diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
566 files changed, 25807 insertions, 15707 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 1a11cab741ac..1acfed2f92ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -2,7 +2,7 @@ config DRM_AMDGPU tristate "AMD GPU" - depends on DRM && PCI && MMU + depends on DRM && PCI depends on !UML select FW_LOADER select DRM_CLIENT @@ -68,7 +68,6 @@ config DRM_AMDGPU_CIK config DRM_AMDGPU_USERPTR bool "Always enable userptr write support" depends on DRM_AMDGPU - depends on MMU select HMM_MIRROR select MMU_NOTIFIER help @@ -77,7 +76,7 @@ config DRM_AMDGPU_USERPTR config DRM_AMD_ISP bool "Enable AMD Image Signal Processor IP support" - depends on DRM_AMDGPU + depends on DRM_AMDGPU && ACPI select MFD_CORE select PM_GENERIC_DOMAINS if PM help diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index aacc810cabb3..930de203d533 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \ - amdgpu_cper.o + amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o @@ -174,7 +174,10 @@ amdgpu-y += \ amdgpu-y += \ amdgpu_mes.o \ mes_v11_0.o \ - mes_v12_0.o + mes_v12_0.o \ + +# add GFX userqueue support +amdgpu-y += mes_userqueue.o # add UVD block amdgpu-y += \ @@ -253,6 +256,8 @@ amdgpu-y += \ # add amdkfd interfaces amdgpu-y += amdgpu_amdkfd.o +# add gfx usermode queue +amdgpu-y += amdgpu_userq.o ifneq ($(CONFIG_HSA_AMD),) AMDKFD_PATH := ../amdkfd diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index e13fbd974141..9569dc16dd3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -71,18 +71,29 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl, return NULL; } +static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev) +{ + uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) | + BIT(AMD_IP_BLOCK_TYPE_SDMA); + + if (adev->aid_mask) + ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH); + + return ip_block_mask; +} + static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) { + uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev); + uint32_t ip_block; int r, i; amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!(adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_GFX || - adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_SDMA)) + ip_block = BIT(adev->ip_blocks[i].version->type); + if (!(ip_block_mask & ip_block)) continue; r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); @@ -200,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl, static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) { struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM]; + uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev); struct amdgpu_firmware_info *ucode; struct amdgpu_ip_block *cmn_block; + struct amdgpu_ip_block *ih_block; int ucode_count = 0; int i, r; @@ -243,6 +256,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) if (r) return r; + if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) { + ih_block = amdgpu_device_ip_get_ip_block(adev, + AMD_IP_BLOCK_TYPE_IH); + if (unlikely(!ih_block)) { + dev_err(adev->dev, "Failed to get IH handle\n"); + return -EINVAL; + } + r = amdgpu_ip_block_resume(ih_block); + if (r) + return r; + } + /* Reinit GFXHUB */ adev->gfxhub.funcs->init(adev); r = adev->gfxhub.funcs->gart_enable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6d83ccfa42ee..ef3af170dda4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -113,6 +113,8 @@ #include "amdgpu_xcp.h" #include "amdgpu_seq64.h" #include "amdgpu_reg_state.h" +#include "amdgpu_userq.h" +#include "amdgpu_eviction_fence.h" #if defined(CONFIG_DRM_AMD_ISP) #include "amdgpu_isp.h" #endif @@ -228,7 +230,7 @@ extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; extern int amdgpu_mtype_local; -extern bool enforce_isolation; +extern int amdgpu_enforce_isolation; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; @@ -266,8 +268,10 @@ extern int amdgpu_umsch_mm_fwlog; extern int amdgpu_user_partt_mode; extern int amdgpu_agp; +extern int amdgpu_rebar; extern int amdgpu_wbrf; +extern int amdgpu_user_queue; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) @@ -353,7 +357,6 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_LAST }; -#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_TRY 1000 @@ -467,9 +470,6 @@ struct amdgpu_sa_manager { void *cpu_ptr; }; -int amdgpu_fence_slab_init(void); -void amdgpu_fence_slab_fini(void); - /* * IRQS. */ @@ -489,7 +489,6 @@ struct amdgpu_flip_work { bool async; }; - /* * file private structure */ @@ -502,6 +501,11 @@ struct amdgpu_fpriv { struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; + struct amdgpu_userq_mgr userq_mgr; + + /* Eviction fence infra */ + struct amdgpu_eviction_fence_mgr evf_mgr; + /** GPU partition selection */ uint32_t xcp_id; }; @@ -513,12 +517,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); */ #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ +/** + * amdgpu_wb - This struct is used for small GPU memory allocation. + * + * This struct is used to allocate a small amount of GPU memory that can be + * used to shadow certain states into the memory. This is especially useful for + * providing easy CPU access to some states without requiring register access + * (e.g., if some block is power gated, reading register may be problematic). + * + * Note: the term writeback was initially used because many of the amdgpu + * components had some level of writeback memory, and this struct initially + * described those components. + */ struct amdgpu_wb { + + /** + * @wb_obj: + * + * Buffer Object used for the writeback memory. + */ struct amdgpu_bo *wb_obj; + + /** + * @wb: + * + * Pointer to the first writeback slot. In terms of CPU address + * this value can be accessed directly by using the offset as an index. + * For the GPU address, it is necessary to use gpu_addr and the offset. + */ volatile uint32_t *wb; + + /** + * @gpu_addr: + * + * Writeback base address in the GPU. + */ uint64_t gpu_addr; - u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ + + /** + * @num_wb: + * + * Number of writeback slots reserved for amdgpu. + */ + u32 num_wb; + + /** + * @used: + * + * Track the writeback slot already used. + */ unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; + + /** + * @lock: + * + * Protects read and write of the used field array. + */ spinlock_t lock; }; @@ -552,6 +606,7 @@ struct amdgpu_allowed_register_entry { * are reset depends on the ASIC. Notably doesn't reset IPs * shared with the CPU on APUs or the memory controllers (so * VRAM is not lost). Not available on all ASICs. + * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card * but without powering off the PCI bus. Suitable only for * discrete GPUs. @@ -569,6 +624,7 @@ enum amd_reset_method { AMD_RESET_METHOD_MODE0, AMD_RESET_METHOD_MODE1, AMD_RESET_METHOD_MODE2, + AMD_RESET_METHOD_LINK, AMD_RESET_METHOD_BACO, AMD_RESET_METHOD_PCI, AMD_RESET_METHOD_ON_INIT, @@ -822,6 +878,12 @@ struct amdgpu_mqd_prop { uint32_t hqd_queue_priority; bool allow_tunneling; bool hqd_active; + uint64_t shadow_addr; + uint64_t gds_bkup_addr; + uint64_t csa_addr; + uint64_t fence_address; + bool tmz_queue; + bool kernel_queue; }; struct amdgpu_mqd { @@ -830,6 +892,12 @@ struct amdgpu_mqd { struct amdgpu_mqd_prop *p); }; +struct amdgpu_pcie_reset_ctx { + bool in_link_reset; + bool occurs_dpc; + bool audio_suspended; +}; + /* * Custom Init levels could be defined for different situations where a full * initialization of all hardware blocks are not expected. Sample cases are @@ -854,6 +922,14 @@ struct amdgpu_init_level { struct amdgpu_reset_domain; struct amdgpu_fru_info; +enum amdgpu_enforce_isolation_mode { + AMDGPU_ENFORCE_ISOLATION_DISABLE = 0, + AMDGPU_ENFORCE_ISOLATION_ENABLE = 1, + AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2, + AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3, +}; + + /* * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. */ @@ -1082,6 +1158,13 @@ struct amdgpu_device { bool enable_uni_mes; struct amdgpu_mes mes; struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM]; + const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM]; + + /* xarray used to retrieve the user queue fence driver reference + * in the EOP interrupt handler to signal the particular user + * queue fence. + */ + struct xarray userq_xa; /* df */ struct amdgpu_df df; @@ -1124,6 +1207,7 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + suspend_state_t last_suspend_state; enum pp_mp1_state mp1_state; struct amdgpu_doorbell_index doorbell_index; @@ -1160,6 +1244,8 @@ struct amdgpu_device { struct pci_saved_state *pci_state; pci_channel_state_t pci_channel_state; + struct amdgpu_pcie_reset_ctx pcie_reset_ctx; + /* Track auto wait count on s_barrier settings */ bool barrier_has_auto_waitcnt; @@ -1193,10 +1279,12 @@ struct amdgpu_device { bool debug_enable_ras_aca; bool debug_exp_resets; bool debug_disable_gpu_ring_reset; + bool debug_vm_userptr; + bool debug_disable_ce_logs; /* Protection for the following isolation structure */ struct mutex enforce_isolation_mutex; - bool enforce_isolation[MAX_XCP]; + enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP]; struct amdgpu_isolation { void *owner; struct dma_fence *spearhead; @@ -1210,6 +1298,10 @@ struct amdgpu_device { * in KFD: VRAM or GTT. */ bool apu_prefer_gtt; + + struct list_head userq_mgr_list; + struct mutex userq_mutex; + bool userq_halt_for_enforce_isolation; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, @@ -1243,6 +1335,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) return container_of(bdev, struct amdgpu_device, mman.bdev); } +static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev) +{ + return !!adev->aid_mask; +} + int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags); void amdgpu_device_fini_hw(struct amdgpu_device *adev); @@ -1294,7 +1391,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, u64 reg_addr, u64 reg_data); u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); @@ -1464,16 +1562,17 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); int amdgpu_device_mode1_reset(struct amdgpu_device *adev); -bool amdgpu_device_supports_atpx(struct drm_device *dev); -bool amdgpu_device_supports_px(struct drm_device *dev); -bool amdgpu_device_supports_boco(struct drm_device *dev); -bool amdgpu_device_supports_smart_shift(struct drm_device *dev); -int amdgpu_device_supports_baco(struct drm_device *dev); +int amdgpu_device_link_reset(struct amdgpu_device *adev); +bool amdgpu_device_supports_atpx(struct amdgpu_device *adev); +bool amdgpu_device_supports_px(struct amdgpu_device *adev); +bool amdgpu_device_supports_boco(struct amdgpu_device *adev); +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev); +int amdgpu_device_supports_baco(struct amdgpu_device *adev); void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); -int amdgpu_device_baco_enter(struct drm_device *dev); -int amdgpu_device_baco_exit(struct drm_device *dev); +int amdgpu_device_baco_enter(struct amdgpu_device *adev); +int amdgpu_device_baco_exit(struct amdgpu_device *adev); void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); @@ -1525,6 +1624,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_prepare(struct drm_device *dev); +void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); @@ -1575,7 +1675,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state); -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size); @@ -1606,19 +1707,24 @@ static inline void amdgpu_acpi_release(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state) { return 0; } -static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, - enum amdgpu_ss ss_state) { return 0; } +static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) +{ + return 0; +} static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); -void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } -static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } +#endif + +#if defined(CONFIG_DRM_AMD_ISP) +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev); #endif void amdgpu_register_gpu_instance(struct amdgpu_device *adev); @@ -1664,4 +1770,19 @@ extern const struct attribute_group amdgpu_flash_attr_group; void amdgpu_set_init_level(struct amdgpu_device *adev, enum amdgpu_init_lvl_id lvl); + +static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev) +{ + u32 status; + int r; + + r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status); + if (r || PCI_POSSIBLE_ERROR(status)) { + dev_err(adev->dev, "device lost from bus!"); + return -ENODEV; + } + + return 0; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index dc47f5fd4ea1..cbc40cad581b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -115,11 +115,19 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID; int i; + if (adev->debug_disable_ce_logs && + bank->smu_err_type == ACA_SMU_TYPE_CE && + !ACA_BANK_ERR_IS_DEFFERED(bank)) + return; + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ for (i = 0; i < ARRAY_SIZE(aca_regs); i++) RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); + + if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS])) + RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n"); } static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type, @@ -195,6 +203,10 @@ static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, { const struct aca_bank_ops *bank_ops = handle->bank_ops; + /* Parse all deferred errors with UMC aca handle */ + if (ACA_BANK_ERR_IS_DEFFERED(bank)) + return handle->hwip == ACA_HWIP_TYPE_UMC; + if (!aca_bank_hwip_is_matched(bank, handle->hwip)) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 6b180f1b33fd..38c88897e1ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -80,14 +80,6 @@ struct ras_query_context; (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \ ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS])) -#define ACA_BANK_ERR_CE_DE_DECODE(bank) \ - (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \ - ACA_ERROR_TYPE_CE) - -#define ACA_BANK_ERR_UE_DE_DECODE(bank) \ - (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \ - ACA_ERROR_TYPE_UE) - enum aca_reg_idx { ACA_REG_IDX_CTL = 0, ACA_REG_IDX_STATUS = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index b7f8f2ff143d..6c62e27b9800 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -811,18 +811,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, /** * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * @ss_state: current smart shift event * * returns 0 on success, * otherwise return error number. */ -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) { - struct amdgpu_device *adev = drm_to_adev(dev); int r; - if (!amdgpu_device_supports_smart_shift(dev)) + if (!amdgpu_device_supports_smart_shift(adev)) return 0; switch (ss_state) { @@ -1532,23 +1532,35 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) return true; #endif /* CONFIG_AMD_PMC */ } +#endif /* CONFIG_SUSPEND */ -/** - * amdgpu_choose_low_power_state - * - * @adev: amdgpu_device_pointer - * - * Choose the target low power state for the GPU - */ -void amdgpu_choose_low_power_state(struct amdgpu_device *adev) -{ - if (adev->in_runpm) - return; +#if IS_ENABLED(CONFIG_DRM_AMD_ISP) +static const struct acpi_device_id isp_sensor_ids[] = { + { "OMNI5C10" }, + { } +}; - if (amdgpu_acpi_is_s0ix_active(adev)) - adev->in_s0ix = true; - else if (amdgpu_acpi_is_s3_active(adev)) - adev->in_s3 = true; +static int isp_match_acpi_device_ids(struct device *dev, const void *data) +{ + return acpi_match_device(data, dev) ? 1 : 0; } -#endif /* CONFIG_SUSPEND */ +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev) +{ + struct device *pdev __free(put_device) = NULL; + struct acpi_device *acpi_pdev; + + pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids, + isp_match_acpi_device_ids); + if (!pdev) + return -EINVAL; + + acpi_pdev = ACPI_COMPANION(pdev); + if (!acpi_pdev) + return -ENODEV; + + *dev = acpi_pdev; + + return 0; +} +#endif /* CONFIG_DRM_AMD_ISP */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4cec3a873995..fbe7616555c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -248,18 +248,34 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); } -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc) { if (adev->kfd.dev) - kgd2kfd_suspend(adev->kfd.dev, run_pm); + kgd2kfd_suspend(adev->kfd.dev, suspend_proc); } -int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc) { int r = 0; if (adev->kfd.dev) - r = kgd2kfd_resume(adev->kfd.dev, run_pm); + r = kgd2kfd_resume(adev->kfd.dev, resume_proc); + + return r; +} + +void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev) +{ + if (adev->kfd.dev) + kgd2kfd_suspend_process(adev->kfd.dev); +} + +int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd.dev) + r = kgd2kfd_resume_process(adev->kfd.dev); return r; } @@ -368,6 +384,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj) { struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj; + if (!bo || !*bo) + return; + (void)amdgpu_bo_reserve(*bo, true); amdgpu_bo_kunmap(*bo); amdgpu_bo_unpin(*bo); @@ -639,7 +658,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, goto err; } - ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0); if (ret) goto err; @@ -746,12 +765,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) { - return kgd2kfd_check_and_lock_kfd(); + return kgd2kfd_check_and_lock_kfd(adev->kfd.dev); } void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) { - kgd2kfd_unlock_kfd(); + kgd2kfd_unlock_kfd(adev->kfd.dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b6ca41859b53..33eb4826b58b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -154,8 +154,10 @@ struct amdkfd_process_info { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); -int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc); +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc); +void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev); +int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); @@ -411,16 +413,18 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); +void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc); +int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc); +void kgd2kfd_suspend_process(struct kfd_dev *kfd); +int kgd2kfd_resume_process(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd, struct amdgpu_reset_context *reset_context); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); -int kgd2kfd_check_and_lock_kfd(void); -void kgd2kfd_unlock_kfd(void); +int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); +void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); @@ -454,11 +458,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) { } -static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) { } -static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) +{ + return 0; +} + +static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd) +{ +} + +static inline int kgd2kfd_resume_process(struct kfd_dev *kfd) { return 0; } @@ -489,12 +502,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { } -static inline int kgd2kfd_check_and_lock_kfd(void) +static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) { return 0; } -static inline void kgd2kfd_unlock_kfd(void) +static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) { } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index ffbaa8bc5eea..1105a09e55dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai if (!down_read_trylock(&adev->reset_domain->sem)) return; - amdgpu_amdkfd_suspend(adev, false); + amdgpu_amdkfd_suspend(adev, true); if (suspend_resume_compute_scheduler(adev, true)) goto out; @@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai out: suspend_resume_compute_scheduler(adev, false); - amdgpu_amdkfd_resume(adev, false); + amdgpu_amdkfd_resume(adev, true); up_read(&adev->reset_domain->sem); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ca4a6b82817f..df77558e03ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev) return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); } +static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) + +{ + return 0; +} + const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .set_scratch_backing_va = set_scratch_backing_va, .set_vm_context_page_table_base = set_vm_context_page_table_base, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, + .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 0f3e2944edd7..e68c0fa8d751 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev, lower_32_bits(page_table_base)); } +static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) + +{ + return 0; +} + const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, .set_vm_context_page_table_base = set_vm_context_page_table_base, + .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d2ec4130a316..260165bbe373 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2559,6 +2559,18 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, if (ret != -EFAULT) return ret; + /* If applications unmap memory before destroying the userptr + * from the KFD, trigger a segmentation fault in VM debug mode. + */ + if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) { + pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n", + pid_nr(process_info->pid), mem->va); + + // Send GPU VM fault to user space + kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid), + mem->va); + } + ret = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index eb015bdda8a7..c7d32fb216e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, case ATOM_DGPU_VRAM_TYPE_GDDR6: vram_type = AMDGPU_VRAM_TYPE_GDDR6; break; + case ATOM_DGPU_VRAM_TYPE_HBM3E: + vram_type = AMDGPU_VRAM_TYPE_HBM3E; + break; default: vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 75fcc521c171..00e96419fcda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -447,6 +447,13 @@ success: return true; } +static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev) +{ + struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE]; + + return (res->flags & IORESOURCE_ROM_SHADOW); +} + static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) { if (amdgpu_atrm_get_bios(adev)) { @@ -465,14 +472,27 @@ static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) goto success; } - if (amdgpu_read_platform_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from platform\n"); - goto success; - } + if (amdgpu_prefer_rom_resource(adev)) { + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } - if (amdgpu_read_bios(adev)) { - dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); - goto success; + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + } else { + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } } if (amdgpu_read_bios_from_rom(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 68bce6a6d09d..004a6a9d6b9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { - case CHIP_TAHITI: - strcpy(fw_name, "radeon/tahiti_smc.bin"); - break; - case CHIP_PITCAIRN: - if ((adev->pdev->revision == 0x81) && - ((adev->pdev->device == 0x6810) || - (adev->pdev->device == 0x6811))) { - info->is_kicker = true; - strcpy(fw_name, "radeon/pitcairn_k_smc.bin"); - } else { - strcpy(fw_name, "radeon/pitcairn_smc.bin"); - } - break; - case CHIP_VERDE: - if (((adev->pdev->device == 0x6820) && - ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83))) || - ((adev->pdev->device == 0x6821) && - ((adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87))) || - ((adev->pdev->revision == 0x87) && - ((adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682b)))) { - info->is_kicker = true; - strcpy(fw_name, "radeon/verde_k_smc.bin"); - } else { - strcpy(fw_name, "radeon/verde_smc.bin"); - } - break; - case CHIP_OLAND: - if (((adev->pdev->revision == 0x81) && - ((adev->pdev->device == 0x6600) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605) || - (adev->pdev->device == 0x6610))) || - ((adev->pdev->revision == 0x83) && - (adev->pdev->device == 0x6610))) { - info->is_kicker = true; - strcpy(fw_name, "radeon/oland_k_smc.bin"); - } else { - strcpy(fw_name, "radeon/oland_smc.bin"); - } - break; - case CHIP_HAINAN: - if (((adev->pdev->revision == 0x81) && - (adev->pdev->device == 0x6660)) || - ((adev->pdev->revision == 0x83) && - ((adev->pdev->device == 0x6660) || - (adev->pdev->device == 0x6663) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)))) { - info->is_kicker = true; - strcpy(fw_name, "radeon/hainan_k_smc.bin"); - } else if ((adev->pdev->revision == 0xc3) && - (adev->pdev->device == 0x6665)) { - info->is_kicker = true; - strcpy(fw_name, "radeon/banks_k_2_smc.bin"); - } else { - strcpy(fw_name, "radeon/hainan_smc.bin"); - } - break; case CHIP_BONAIRE: if ((adev->pdev->revision == 0x80) || (adev->pdev->revision == 0x81) || (adev->pdev->device == 0x665f)) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/bonaire_k_smc.bin"); + strscpy(fw_name, "amdgpu/bonaire_k_smc.bin"); } else { - strcpy(fw_name, "amdgpu/bonaire_smc.bin"); + strscpy(fw_name, "amdgpu/bonaire_smc.bin"); } break; case CHIP_HAWAII: if (adev->pdev->revision == 0x80) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/hawaii_k_smc.bin"); + strscpy(fw_name, "amdgpu/hawaii_k_smc.bin"); } else { - strcpy(fw_name, "amdgpu/hawaii_smc.bin"); + strscpy(fw_name, "amdgpu/hawaii_smc.bin"); } break; case CHIP_TOPAZ: @@ -338,76 +277,76 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); + strscpy(fw_name, "amdgpu/topaz_k_smc.bin"); } else - strcpy(fw_name, "amdgpu/topaz_smc.bin"); + strscpy(fw_name, "amdgpu/topaz_smc.bin"); break; case CHIP_TONGA: if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); + strscpy(fw_name, "amdgpu/tonga_k_smc.bin"); } else - strcpy(fw_name, "amdgpu/tonga_smc.bin"); + strscpy(fw_name, "amdgpu/tonga_smc.bin"); break; case CHIP_FIJI: - strcpy(fw_name, "amdgpu/fiji_smc.bin"); + strscpy(fw_name, "amdgpu/fiji_smc.bin"); break; case CHIP_POLARIS11: if (type == CGS_UCODE_ID_SMU) { if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); + strscpy(fw_name, "amdgpu/polaris11_k_smc.bin"); } else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); + strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); } else { - strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + strscpy(fw_name, "amdgpu/polaris11_smc.bin"); } } else if (type == CGS_UCODE_ID_SMU_SK) { - strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); + strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); } break; case CHIP_POLARIS10: if (type == CGS_UCODE_ID_SMU) { if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); + strscpy(fw_name, "amdgpu/polaris10_k_smc.bin"); } else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); + strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); } else { - strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + strscpy(fw_name, "amdgpu/polaris10_smc.bin"); } } else if (type == CGS_UCODE_ID_SMU_SK) { - strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); + strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); } break; case CHIP_POLARIS12: if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; - strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); + strscpy(fw_name, "amdgpu/polaris12_k_smc.bin"); } else { - strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + strscpy(fw_name, "amdgpu/polaris12_smc.bin"); } break; case CHIP_VEGAM: - strcpy(fw_name, "amdgpu/vegam_smc.bin"); + strscpy(fw_name, "amdgpu/vegam_smc.bin"); break; case CHIP_VEGA10: if ((adev->pdev->device == 0x687f) && ((adev->pdev->revision == 0xc0) || (adev->pdev->revision == 0xc1) || (adev->pdev->revision == 0xc3))) - strcpy(fw_name, "amdgpu/vega10_acg_smc.bin"); + strscpy(fw_name, "amdgpu/vega10_acg_smc.bin"); else - strcpy(fw_name, "amdgpu/vega10_smc.bin"); + strscpy(fw_name, "amdgpu/vega10_smc.bin"); break; case CHIP_VEGA12: - strcpy(fw_name, "amdgpu/vega12_smc.bin"); + strscpy(fw_name, "amdgpu/vega12_smc.bin"); break; case CHIP_VEGA20: - strcpy(fw_name, "amdgpu/vega20_smc.bin"); + strscpy(fw_name, "amdgpu/vega20_smc.bin"); break; default: DRM_ERROR("SMC firmware not supported\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index 360e07a5c7c1..25252231a68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -212,7 +212,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false, - CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN, + CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN, NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); section->hdr.valid_bits.err_info_cnt = 1; @@ -326,7 +326,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev) return -ENOMEM; } - amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM); + amdgpu_cper_entry_fill_hdr(adev, bp_threshold, + AMDGPU_CPER_TYPE_BP_THRESHOLD, + CPER_SEV_FATAL); ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0); if (ret) return ret; @@ -457,7 +459,7 @@ calc: void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) { - u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask; + u64 pos, wptr_old, rptr; int rec_cnt_dw = count >> 2; u32 chunk, ent_sz; u8 *s = (u8 *)src; @@ -470,9 +472,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) return; } + mutex_lock(&ring->adev->cper.ring_lock); + wptr_old = ring->wptr; + rptr = *ring->rptr_cpu_addr & ring->ptr_mask; - mutex_lock(&ring->adev->cper.ring_lock); while (count) { ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr); chunk = umin(ent_sz, count); @@ -549,7 +553,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev) { int r; - if (!amdgpu_aca_is_enabled(adev)) + if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev)) return 0; r = amdgpu_cper_ring_init(adev); @@ -568,7 +572,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev) int amdgpu_cper_fini(struct amdgpu_device *adev) { - if (!amdgpu_aca_is_enabled(adev)) + if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev)) return 0; adev->cper.enabled = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82df06a72ee0..a2adaacf6adb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -293,10 +293,29 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, for (i = 0; i < p->gang_size; ++i) { ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, - num_ibs[i], &p->jobs[i]); + num_ibs[i], &p->jobs[i], + p->filp->client_id); if (ret) goto free_all_kdata; - p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id]; + switch (p->adev->enforce_isolation[fpriv->xcp_id]) { + case AMDGPU_ENFORCE_ISOLATION_DISABLE: + default: + p->jobs[i]->enforce_isolation = false; + p->jobs[i]->run_cleaner_shader = false; + break; + case AMDGPU_ENFORCE_ISOLATION_ENABLE: + p->jobs[i]->enforce_isolation = true; + p->jobs[i]->run_cleaner_shader = true; + break; + case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY: + p->jobs[i]->enforce_isolation = true; + p->jobs[i]->run_cleaner_shader = false; + break; + case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER: + p->jobs[i]->enforce_isolation = true; + p->jobs[i]->run_cleaner_shader = false; + break; + } } p->gang_leader = p->jobs[p->gang_leader_idx]; @@ -349,6 +368,10 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, ring = amdgpu_job_ring(job); ib = &job->ibs[job->num_ibs++]; + /* submissions to kernel queues are disabled */ + if (ring->no_user_submission) + return -EINVAL; + /* MM engine doesn't support user fences */ if (p->uf_bo && ring->funcs->no_user_fence) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index cfdf558b48b6..02138aa55793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index c43d1b6e5d66..f5d5c45ddc0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -919,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) return timeout; } -void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) +static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { struct amdgpu_ctx *ctx; struct idr *idp; @@ -944,24 +944,13 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) drm_sched_entity_fini(entity); } } + kref_put(&ctx->refcount, amdgpu_ctx_fini); } } void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) { - struct amdgpu_ctx *ctx; - struct idr *idp; - uint32_t id; - amdgpu_ctx_mgr_entity_fini(mgr); - - idp = &mgr->ctx_handles; - - idr_for_each_entry(idp, ctx, id) { - if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) - DRM_ERROR("ctx %p is still alive\n", ctx); - } - idr_destroy(&mgr->ctx_handles); mutex_destroy(&mgr->lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 85376baaa92f..090dfe86f75b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr, struct amdgpu_device *adev); -void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a1450f13d963..0e6e2e2acf5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) ti = amdgpu_vm_get_task_info_vm(vm); if (ti) { - seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name); + seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name); amdgpu_vm_put_task_info(ti); } @@ -1902,7 +1902,7 @@ no_preempt: continue; } job = to_amdgpu_job(s_job); - if (preempted && (&job->hw_fence) == fence) + if (preempted && (&job->hw_fence.base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); + amdgpu_psp_debugfs_init(adev); debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); @@ -2130,6 +2131,55 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_pt_info_read(struct seq_file *m, void *unused) +{ + struct drm_file *file; + struct amdgpu_fpriv *fpriv; + struct amdgpu_bo *root_bo; + int r; + + file = m->private; + if (!file) + return -EINVAL; + + fpriv = file->driver_priv; + if (!fpriv || !fpriv->vm.root.bo) + return -ENODEV; + + root_bo = amdgpu_bo_ref(fpriv->vm.root.bo); + r = amdgpu_bo_reserve(root_bo, true); + if (r) { + amdgpu_bo_unref(&root_bo); + return -EINVAL; + } + + seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo)); + + amdgpu_bo_unreserve(root_bo); + amdgpu_bo_unref(&root_bo); + + return 0; +} + +static int amdgpu_pt_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_pt_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_pt_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_pt_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ + debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file, + &amdgpu_pt_info_fops); +} + #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { @@ -2139,4 +2189,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 0425432d8659..e7b3c38e5186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); +void amdgpu_debugfs_vm_init(struct drm_file *file); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 7b50741dc097..8a026bc9ea44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -220,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, coredump->reset_time.tv_nsec); - if (coredump->reset_task_info.pid) + if (coredump->reset_task_info.task.pid) drm_printf(&p, "process_name: %s PID: %d\n", coredump->reset_task_info.process_name, - coredump->reset_task_info.pid); + coredump->reset_task_info.task.pid); /* SOC Information */ drm_printf(&p, "\nSOC Information\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a30111d2c3ea..6f93473436be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -85,6 +85,7 @@ #if IS_ENABLED(CONFIG_X86) #include <asm/intel-family.h> +#include <asm/cpu_device_id.h> #endif MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); @@ -231,7 +232,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) { int ret = 0; - if (!amdgpu_sriov_vf(adev)) + if (amdgpu_nbio_is_replay_cnt_supported(adev)) ret = sysfs_create_file(&adev->dev->kobj, &dev_attr_pcie_replay_count.attr); @@ -240,7 +241,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) + if (amdgpu_nbio_is_replay_cnt_supported(adev)) sysfs_remove_file(&adev->dev->kobj, &dev_attr_pcie_replay_count.attr); } @@ -410,19 +411,16 @@ static const struct attribute_group amdgpu_board_attrs_group = { static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); - /** * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with ATPX power control, * otherwise return false. */ -bool amdgpu_device_supports_px(struct drm_device *dev) +bool amdgpu_device_supports_px(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) return true; return false; @@ -431,15 +429,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev) /** * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with ACPI power control, * otherwise return false. */ -bool amdgpu_device_supports_boco(struct drm_device *dev) +bool amdgpu_device_supports_boco(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false; @@ -452,29 +448,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) /** * amdgpu_device_supports_baco - Does the device support BACO * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Return: * 1 if the device supports BACO; * 3 if the device supports MACO (only works if BACO is supported) * otherwise return 0. */ -int amdgpu_device_supports_baco(struct drm_device *dev) +int amdgpu_device_supports_baco(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - return amdgpu_asic_supports_baco(adev); } void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) { - struct drm_device *dev; int bamaco_support; - dev = adev_to_drm(adev); - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - bamaco_support = amdgpu_device_supports_baco(dev); + bamaco_support = amdgpu_device_supports_baco(adev); switch (amdgpu_runtime_pm) { case 2: @@ -494,10 +485,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) break; case -1: case -2: - if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + if (amdgpu_device_supports_px(adev)) { + /* enable PX as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_PX; dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + } else if (amdgpu_device_supports_boco(adev)) { + /* enable boco as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; dev_info(adev->dev, "Using BOCO for runtime pm\n"); } else { @@ -511,12 +504,13 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) break; case CHIP_VEGA10: /* enable BACO as runpm mode if noretry=0 */ - if (!adev->gmc.noretry) + if (!adev->gmc.noretry && !amdgpu_passthrough(adev)) adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; break; default: /* enable BACO as runpm mode on CI+ */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + if (!amdgpu_passthrough(adev)) + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; break; } @@ -545,14 +539,14 @@ no_runtime_pm: * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with Smart Shift support, * otherwise returns false. */ -bool amdgpu_device_supports_smart_shift(struct drm_device *dev) +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev) { - return (amdgpu_device_supports_boco(dev) && + return (amdgpu_device_supports_boco(adev) && amdgpu_acpi_is_power_shift_control_supported()); } @@ -1286,14 +1280,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) */ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) { - DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg); BUG(); return 0; } static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg) { - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); BUG(); return 0; } @@ -1310,15 +1304,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg */ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { - DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write register 0x%04X with 0x%08X\n", reg, + v); BUG(); } static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v) { - DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write register 0x%llX with 0x%08X\n", reg, + v); BUG(); } @@ -1334,14 +1330,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui */ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) { - DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); + dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n", + reg); BUG(); return 0; } static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg) { - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); BUG(); return 0; } @@ -1358,15 +1355,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r */ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) { - DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", + reg, v); BUG(); } static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v) { - DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", + reg, v); BUG(); } @@ -1384,8 +1383,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, uint32_t block, uint32_t reg) { - DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", - reg, block); + dev_err(adev->dev, + "Invalid callback to read register 0x%04X in block 0x%04X\n", + reg, block); BUG(); return 0; } @@ -1405,8 +1405,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, uint32_t block, uint32_t reg, uint32_t v) { - DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", - reg, block, v); + dev_err(adev->dev, + "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", + reg, block, v); BUG(); } @@ -1680,6 +1681,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + if (!amdgpu_rebar) + return 0; + /* resizing on Dell G5 SE platforms causes problems with runtime pm */ if ((amdgpu_runtime_pm != 0) && adev->pdev->vendor == PCI_VENDOR_ID_ATI && @@ -1689,7 +1693,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space, please check!!\n"); + dev_warn( + adev->dev, + "System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && @@ -1729,9 +1735,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) r = pci_resize_resource(adev->pdev, 0, rbar_size); if (r == -ENOSPC) - DRM_INFO("Not enough PCI address space for a large BAR."); + dev_info(adev->dev, + "Not enough PCI address space for a large BAR."); else if (r && r != -ENOTSUPP) - DRM_ERROR("Problem resizing BAR0 (%d).", r); + dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); pci_assign_unassigned_bus_resources(adev->pdev->bus); @@ -1833,8 +1840,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) case 0: return false; default: - DRM_ERROR("Invalid value for amdgpu.seamless: %d\n", - amdgpu_seamless); + dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n", + amdgpu_seamless); return false; } @@ -1870,6 +1877,35 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device return true; } +static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev) +{ +#if IS_ENABLED(CONFIG_X86) + struct cpuinfo_x86 *c = &cpu_data(0); + + if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1))) + return false; + + if (c->x86 == 6 && + adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) { + switch (c->x86_model) { + case VFM_MODEL(INTEL_ALDERLAKE): + case VFM_MODEL(INTEL_ALDERLAKE_L): + case VFM_MODEL(INTEL_RAPTORLAKE): + case VFM_MODEL(INTEL_RAPTORLAKE_P): + case VFM_MODEL(INTEL_RAPTORLAKE_S): + return true; + default: + return false; + } + } else { + return false; + } +#else + return false; +#endif +} + /** * amdgpu_device_should_use_aspm - check if the device should program ASPM * @@ -1894,7 +1930,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) } if (adev->flags & AMD_IS_APU) return false; - if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) + if (amdgpu_device_aspm_support_quirk(adev)) return false; return pcie_aspm_enabled(adev->pdev); } @@ -1981,7 +2017,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) return; if (!is_os_64) { - DRM_WARN("Not 64-bit OS, feature not supported\n"); + dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n"); goto def_value; } si_meminfo(&si); @@ -1996,7 +2032,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) if (total_memory < dram_size_seven_GB) goto def_value1; } else { - DRM_WARN("Smu memory pool size not supported\n"); + dev_warn(adev->dev, "Smu memory pool size not supported\n"); goto def_value; } adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; @@ -2004,7 +2040,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) return; def_value1: - DRM_WARN("No enough system memory\n"); + dev_warn(adev->dev, "No enough system memory\n"); def_value: adev->pm.smu_prv_buffer_size = 0; } @@ -2112,8 +2148,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - for (i = 0; i < MAX_XCP; i++) - adev->enforce_isolation[i] = !!enforce_isolation; + for (i = 0; i < MAX_XCP; i++) { + switch (amdgpu_enforce_isolation) { + case -1: + case 0: + default: + /* disable */ + adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE; + break; + case 1: + /* enable */ + adev->enforce_isolation[i] = + AMDGPU_ENFORCE_ISOLATION_ENABLE; + break; + case 2: + /* enable legacy mode */ + adev->enforce_isolation[i] = + AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY; + break; + case 3: + /* enable only process isolation without submitting cleaner shader */ + adev->enforce_isolation[i] = + AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER; + break; + } + } return 0; } @@ -2133,7 +2192,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, struct drm_device *dev = pci_get_drvdata(pdev); int r; - if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_px(drm_to_adev(dev)) && + state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -2145,12 +2205,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, amdgpu_device_load_pci_state(pdev); r = pci_enable_device(pdev); if (r) - DRM_WARN("pci_enable_device failed (%d)\n", r); + dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n", + r); amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { - pr_info("switched off\n"); + dev_info(&pdev->dev, "switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_prepare(dev); amdgpu_device_suspend(dev, true); @@ -2217,8 +2278,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state( &adev->ip_blocks[i], state); if (r) - DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_clockgating_state of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } return r; } @@ -2251,8 +2313,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev, r = adev->ip_blocks[i].version->funcs->set_powergating_state( &adev->ip_blocks[i], state); if (r) - DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_powergating_state of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } return r; } @@ -2468,9 +2531,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) } } - DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", - amdgpu_virtual_display, pci_address_name, - adev->enable_virtual_display, adev->mode_info.num_crtc); + dev_info( + adev->dev, + "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display, adev->mode_info.num_crtc); kfree(pciaddstr); } @@ -2481,8 +2546,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; - DRM_INFO("virtual_display:%d, num_crtc:%d\n", - adev->enable_virtual_display, adev->mode_info.num_crtc); + dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n", + adev->enable_virtual_display, + adev->mode_info.num_crtc); } } @@ -2689,6 +2755,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) break; } + /* Check for IP version 9.4.3 with A0 hardware */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && + !amdgpu_device_get_rev_id(adev)) { + dev_err(adev->dev, "Unsupported A0 hardware\n"); + return -ENODEV; /* device unsupported - no device error */ + } + if (amdgpu_has_atpx() && (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && @@ -2701,7 +2774,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->has_pr3 = parent ? pci_pr3_present(parent) : false; } - adev->pm.pp_feature = amdgpu_pp_feature_mask; if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; @@ -2710,21 +2782,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (!amdgpu_device_pcie_dynamic_switching_supported(adev)) adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; + adev->virt.is_xgmi_node_migrate_enabled = false; + if (amdgpu_sriov_vf(adev)) { + adev->virt.is_xgmi_node_migrate_enabled = + amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4); + } + total = true; for (i = 0; i < adev->num_ip_blocks; i++) { ip_block = &adev->ip_blocks[i]; if ((amdgpu_ip_block_mask & (1 << i)) == 0) { - DRM_WARN("disabled ip block: %d <%s>\n", - i, adev->ip_blocks[i].version->funcs->name); + dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i, + adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; } else if (ip_block->version->funcs->early_init) { r = ip_block->version->funcs->early_init(ip_block); if (r == -ENOENT) { adev->ip_blocks[i].status.valid = false; } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); total = false; } else { adev->ip_blocks[i].status.valid = true; @@ -2805,8 +2885,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2830,8 +2912,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2869,8 +2952,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } else { r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i] + .version->funcs->name, + r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2925,25 +3011,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) r = drm_sched_init(&ring->sched, &args); if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create scheduler on ring %s.\n", + ring->name); return r; } r = amdgpu_uvd_entity_init(adev, ring); if (r) { - DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create UVD scheduling entity on ring %s.\n", + ring->name); return r; } r = amdgpu_vce_entity_init(adev, ring); if (r) { - DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create VCE scheduling entity on ring %s.\n", + ring->name); return r; } } - amdgpu_xcp_update_partition_sched_list(adev); + if (adev->xcp_mgr) + amdgpu_xcp_update_partition_sched_list(adev); return 0; } @@ -2975,8 +3065,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->sw_init) { r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); goto init_failed; } } @@ -2990,7 +3082,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* need to do common hw init early so everything is set up for gmc */ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + dev_err(adev->dev, "hw_init %d failed %d\n", i, + r); goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -3002,17 +3095,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) r = amdgpu_device_mem_scratch_init(adev); if (r) { - DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); + dev_err(adev->dev, + "amdgpu_mem_scratch_init failed %d\n", + r); goto init_failed; } r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + dev_err(adev->dev, "hw_init %d failed %d\n", i, + r); goto init_failed; } r = amdgpu_device_wb_init(adev); if (r) { - DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); + dev_err(adev->dev, + "amdgpu_device_wb_init failed %d\n", r); goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -3024,14 +3121,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) AMDGPU_GEM_DOMAIN_GTT, AMDGPU_CSA_SIZE); if (r) { - DRM_ERROR("allocate CSA failed %d\n", r); + dev_err(adev->dev, + "allocate CSA failed %d\n", r); goto init_failed; } } r = amdgpu_seq64_init(adev); if (r) { - DRM_ERROR("allocate seq64 failed %d\n", r); + dev_err(adev->dev, "allocate seq64 failed %d\n", + r); goto init_failed; } } @@ -3172,6 +3271,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * always assumed to be lost. */ switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_LINK: case AMD_RESET_METHOD_BACO: case AMD_RESET_METHOD_MODE1: return true; @@ -3220,8 +3320,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], state); if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_clockgating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3257,8 +3359,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], state); if (r) { - DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_powergating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3324,8 +3428,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("late_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "late_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3334,7 +3440,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_ras_late_init(adev); if (r) { - DRM_ERROR("amdgpu_ras_late_init failed %d", r); + dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r); return r; } @@ -3348,7 +3454,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_device_enable_mgpu_fan_boost(); if (r) - DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); + dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r); /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ if (amdgpu_passthrough(adev) && @@ -3381,7 +3487,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_xgmi_set_pstate(gpu_instance->adev, AMDGPU_XGMI_PSTATE_MIN); if (r) { - DRM_ERROR("pstate setting failed (%d).\n", r); + dev_err(adev->dev, + "pstate setting failed (%d).\n", + r); break; } } @@ -3395,17 +3503,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; if (!ip_block->version->funcs->hw_fini) { - DRM_ERROR("hw_fini of IP block <%s> not defined\n", - ip_block->version->funcs->name); + dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); } else { r = ip_block->version->funcs->hw_fini(ip_block); /* XXX handle errors */ if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - ip_block->version->funcs->name, r); + dev_dbg(adev->dev, + "hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); } } @@ -3446,15 +3556,17 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { - DRM_DEBUG("early_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_dbg(adev->dev, + "early_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } } amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - amdgpu_amdkfd_suspend(adev, false); + amdgpu_amdkfd_suspend(adev, true); + amdgpu_userq_suspend(adev); /* Workaround for ASICs need to disable SMC first */ amdgpu_device_smu_fini_early(adev); @@ -3468,7 +3580,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) { if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); + dev_err(adev->dev, + "failed to release exclusive mode on fini\n"); } return 0; @@ -3510,13 +3623,16 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); + amdgpu_doorbell_fini(adev); } if (adev->ip_blocks[i].version->funcs->sw_fini) { r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); /* XXX handle errors */ if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_dbg(adev->dev, + "sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); } } adev->ip_blocks[i].status.sw = false; @@ -3549,7 +3665,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) r = amdgpu_ib_ring_tests(adev); if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); + dev_err(adev->dev, "ib ring test failed (%d).\n", r); } static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) @@ -3643,6 +3759,13 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev, adev->ip_blocks[i].version->type)) continue; + /* Since we skip suspend for S0i3, we need to cancel the delayed + * idle work here as the suspend callback never gets called. + */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX && + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) + cancel_delayed_work_sync(&adev->gfx.idle_work); /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just * like at runtime. PSP is also part of the always on hardware @@ -3683,8 +3806,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); + dev_err(adev->dev, + "SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); return r; } } @@ -3968,12 +4092,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) /** * amdgpu_device_asic_has_dc_support - determine if DC supports the asic * + * @pdev : pci device context * @asic_type: AMD asic type * * Check if there is DC (new modesetting infrastructre) support for an asic. * returns true if DC has support, false if not. */ -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type) { switch (asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI @@ -4016,7 +4142,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #else default: if (amdgpu_dc > 0) - DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); + dev_info_once( + &pdev->dev, + "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); return false; #endif } @@ -4035,7 +4163,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; - return amdgpu_device_asic_has_dc_support(adev->asic_type); + return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type); } static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) @@ -4057,13 +4185,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { task_barrier_enter(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); + adev->asic_reset_res = amdgpu_device_baco_enter(adev); if (adev->asic_reset_res) goto fail; task_barrier_exit(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); + adev->asic_reset_res = amdgpu_device_baco_exit(adev); if (adev->asic_reset_res) goto fail; @@ -4077,7 +4205,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) fail: if (adev->asic_reset_res) - DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", + dev_warn(adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev_to_drm(adev)->unique); amdgpu_put_xgmi_hive(hive); } @@ -4091,18 +4220,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) int ret = 0; /* - * By default timeout for non compute jobs is 10000 - * and 60000 for compute jobs. - * In SR-IOV or passthrough mode, timeout for compute - * jobs are 60000 by default. + * By default timeout for jobs is 10 sec */ - adev->gfx_timeout = msecs_to_jiffies(10000); + adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev)) - adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? - msecs_to_jiffies(60000) : msecs_to_jiffies(10000); - else - adev->compute_timeout = msecs_to_jiffies(60000); if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { while ((timeout_setting = strsep(&input, ",")) && @@ -4201,7 +4322,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) adev->gfx.mcbp = true; if (adev->gfx.mcbp) - DRM_INFO("MCBP is enabled\n"); + dev_info(adev->dev, "MCBP is enabled\n"); } /** @@ -4217,7 +4338,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags) { - struct drm_device *ddev = adev_to_drm(adev); struct pci_dev *pdev = adev->pdev; int r, i; bool px = false; @@ -4269,9 +4389,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", - amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); + dev_info( + adev->dev, + "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); /* mutex initialization are all done here so we * can recall function without having locking issues @@ -4299,9 +4421,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_sync_create(&adev->isolation[i].active); amdgpu_sync_create(&adev->isolation[i].prev); } - mutex_init(&adev->gfx.kfd_sch_mutex); + mutex_init(&adev->gfx.userq_sch_mutex); mutex_init(&adev->gfx.workload_profile_mutex); mutex_init(&adev->vcn.workload_profile_mutex); + mutex_init(&adev->userq_mutex); amdgpu_device_init_apu_flags(adev); @@ -4321,12 +4444,16 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->virt.rlcg_reg_lock); spin_lock_init(&adev->wb.lock); + xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ); + INIT_LIST_HEAD(&adev->reset_list); INIT_LIST_HEAD(&adev->ras_list); INIT_LIST_HEAD(&adev->pm.od_kobj_list); + INIT_LIST_HEAD(&adev->userq_mgr_list); + INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, @@ -4383,8 +4510,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!adev->rmmio) return -ENOMEM; - DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); - DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); + dev_info(adev->dev, "register mmio base: 0x%08X\n", + (uint32_t)adev->rmmio_base); + dev_info(adev->dev, "register mmio size: %u\n", + (unsigned int)adev->rmmio_size); /* * Reset domain needs to be present early, before XGMI hive discovered @@ -4521,7 +4650,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = -EINVAL; goto failed; } - DRM_INFO("GPU posting now...\n"); + dev_info(adev->dev, "GPU posting now...\n"); r = amdgpu_device_asic_init(adev); if (r) { dev_err(adev->dev, "gpu post error!\n"); @@ -4631,12 +4760,12 @@ fence_driver_init: r = amdgpu_pm_sysfs_init(adev); if (r) - DRM_ERROR("registering pm sysfs failed (%d).\n", r); + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); r = amdgpu_ucode_sysfs_init(adev); if (r) { adev->ucode_sysfs_en = false; - DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); } else adev->ucode_sysfs_en = true; @@ -4651,7 +4780,7 @@ fence_driver_init: amdgpu_fru_sysfs_init(adev); amdgpu_reg_state_sysfs_init(adev); - amdgpu_xcp_cfg_sysfs_init(adev); + amdgpu_xcp_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4669,7 +4798,7 @@ fence_driver_init: if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); - px = amdgpu_device_supports_px(ddev); + px = amdgpu_device_supports_px(adev); if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) @@ -4781,7 +4910,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_fru_sysfs_fini(adev); amdgpu_reg_state_sysfs_fini(adev); - amdgpu_xcp_cfg_sysfs_fini(adev); + amdgpu_xcp_sysfs_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4835,7 +4964,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->xcp_mgr); adev->xcp_mgr = NULL; - px = amdgpu_device_supports_px(adev_to_drm(adev)); + px = amdgpu_device_supports_px(adev); if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) @@ -4851,7 +4980,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); drm_dev_exit(idx); } @@ -4885,8 +5013,16 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) return 0; ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - if (ret) - DRM_WARN("evicting device resources failed\n"); + if (ret) { + dev_warn(adev->dev, "evicting device resources failed\n"); + return ret; + } + + if (adev->in_s4) { + ret = ttm_device_prepare_hibernation(&adev->mman.bdev); + if (ret) + dev_err(adev->dev, "prepare hibernation failed, %d\n", ret); + } return ret; } @@ -4900,28 +5036,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) * @data: data * * This function is called when the system is about to suspend or hibernate. - * It is used to evict resources from the device before the system goes to - * sleep while there is still access to swap. + * It is used to set the appropriate flags so that eviction can be optimized + * in the pm prepare callback. */ static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, void *data) { struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); - int r; switch (mode) { case PM_HIBERNATION_PREPARE: adev->in_s4 = true; - fallthrough; - case PM_SUSPEND_PREPARE: - r = amdgpu_device_evict_resources(adev); - /* - * This is considered non-fatal at this time because - * amdgpu_device_prepare() will also fatally evict resources. - * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781 - */ - if (r) - drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r); + break; + case PM_POST_HIBERNATION: + adev->in_s4 = false; break; } @@ -4942,15 +5070,13 @@ int amdgpu_device_prepare(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); int i, r; - amdgpu_choose_low_power_state(adev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* Evict the majority of BOs before starting suspend sequence */ r = amdgpu_device_evict_resources(adev); if (r) - goto unprepare; + return r; flush_delayed_work(&adev->gfx.gfx_off_delay_work); @@ -4961,15 +5087,32 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) - goto unprepare; + return r; } return 0; +} -unprepare: - adev->in_s0ix = adev->in_s3 = adev->in_s4 = false; +/** + * amdgpu_device_complete - complete power state transition + * + * @dev: drm dev pointer + * + * Undo the changes from amdgpu_device_prepare. This will be + * called on all resume transitions, including those that failed. + */ +void amdgpu_device_complete(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int i; - return r; + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].version->funcs->complete) + continue; + adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]); + } } /** @@ -4993,14 +5136,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) adev->in_suspend = true; if (amdgpu_sriov_vf(adev)) { + if (!adev->in_s0ix && !adev->in_runpm) + amdgpu_amdkfd_suspend_process(adev); amdgpu_virt_fini_data_exchange(adev); r = amdgpu_virt_request_full_gpu(adev, false); if (r) return r; } - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3)) + dev_warn(adev->dev, "smart shift update failed\n"); if (notify_clients) drm_client_dev_suspend(adev_to_drm(adev), false); @@ -5011,8 +5156,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) amdgpu_device_ip_suspend_phase1(adev); - if (!adev->in_s0ix) - amdgpu_amdkfd_suspend(adev, adev->in_runpm); + if (!adev->in_s0ix) { + amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + amdgpu_userq_suspend(adev); + } r = amdgpu_device_evict_resources(adev); if (r) @@ -5034,6 +5181,32 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) return 0; } +static inline int amdgpu_virt_resume(struct amdgpu_device *adev) +{ + int r; + unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id; + + /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO) + * may not work. The access could be blocked by nBIF protection as VF isn't in + * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX + * so that QEMU reprograms MSIX table. + */ + amdgpu_restore_msix(adev); + + r = adev->gfxhub.funcs->get_xgmi_info(adev); + if (r) + return r; + + dev_info(adev->dev, "xgmi node, old id %d, new id %d\n", + prev_physical_node_id, adev->gmc.xgmi.physical_node_id); + + adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); + adev->vm_manager.vram_base_offset += + adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + + return 0; +} + /** * amdgpu_device_resume - initiate device resume * @@ -5055,6 +5228,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) return r; } + if (amdgpu_virt_xgmi_migrate_enabled(adev)) { + r = amdgpu_virt_resume(adev); + if (r) + goto exit; + } + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -5076,7 +5255,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) } if (!adev->in_s0ix) { - r = amdgpu_amdkfd_resume(adev, adev->in_runpm); + r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); + if (r) + goto exit; + + r = amdgpu_userq_resume(adev); if (r) goto exit; } @@ -5091,6 +5274,9 @@ exit: if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); + + if (!adev->in_s0ix && !r && !adev->in_runpm) + r = amdgpu_amdkfd_resume_process(adev); } if (r) @@ -5125,13 +5311,12 @@ exit: dev->dev->power.disable_depth--; #endif } - adev->in_suspend = false; - if (adev->enable_mes) - amdgpu_mes_self_test(adev); + amdgpu_vram_mgr_clear_reset_blocks(adev); + adev->in_suspend = false; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0)) + dev_warn(adev->dev, "smart shift update failed\n"); return 0; } @@ -5510,6 +5695,29 @@ mode1_reset_failed: return ret; } +int amdgpu_device_link_reset(struct amdgpu_device *adev) +{ + int ret = 0; + + dev_info(adev->dev, "GPU link reset\n"); + + if (!adev->pcie_reset_ctx.occurs_dpc) + ret = amdgpu_dpm_link_reset(adev); + + if (ret) + goto link_reset_failed; + + ret = amdgpu_psp_wait_for_bootloader(adev); + if (ret) + goto link_reset_failed; + + return 0; + +link_reset_failed: + dev_err(adev->dev, "GPU link reset failed\n"); + return ret; +} + int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, struct amdgpu_reset_context *reset_context) { @@ -5639,7 +5847,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); if (vram_lost) { - DRM_INFO("VRAM is lost due to GPU reset!\n"); + dev_info( + tmp_adev->dev, + "VRAM is lost due to GPU reset!\n"); amdgpu_inc_vram_lost(tmp_adev); } @@ -5814,6 +6024,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) switch (amdgpu_asic_reset_method(adev)) { case AMD_RESET_METHOD_MODE1: + case AMD_RESET_METHOD_LINK: adev->mp1_state = PP_MP1_STATE_SHUTDOWN; break; case AMD_RESET_METHOD_MODE2: @@ -5917,117 +6128,85 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle) { struct amdgpu_device *tmp_adev; int ret = 0; - u32 status; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); - if (PCI_POSSIBLE_ERROR(status)) { - dev_err(tmp_adev->dev, "device lost from bus!"); - ret = -ENODEV; - } + ret |= amdgpu_device_bus_status_check(tmp_adev); } return ret; } -/** - * amdgpu_device_gpu_recover - reset the asic and recover scheduler - * - * @adev: amdgpu_device pointer - * @job: which job trigger hang - * @reset_context: amdgpu reset context pointer - * - * Attempt to reset the GPU if it has hung (all asics). - * Attempt to do soft-reset or full-reset and reinitialize Asic - * Returns 0 for success or an error on failure. - */ - -int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - struct amdgpu_job *job, - struct amdgpu_reset_context *reset_context) +static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev, + struct list_head *device_list, + struct amdgpu_hive_info *hive) { - struct list_head device_list, *device_list_handle = NULL; - bool job_signaled = false; - struct amdgpu_hive_info *hive = NULL; struct amdgpu_device *tmp_adev = NULL; - int i, r = 0; - bool need_emergency_restart = false; - bool audio_suspended = false; - int retry_limit = AMDGPU_MAX_RETRY_LIMIT; - - /* - * If it reaches here because of hang/timeout and a RAS error is - * detected at the same time, let RAS recovery take care of it. - */ - if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) && - !amdgpu_sriov_vf(adev) && - reset_context->src != AMDGPU_RESET_SRC_RAS) { - dev_dbg(adev->dev, - "Gpu recovery from source: %d yielding to RAS error recovery handling", - reset_context->src); - return 0; - } - /* - * Special case: RAS triggered and full reset isn't supported - */ - need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); + int r; /* - * Flush RAM to disk so that after reboot - * the user can read log and see why the system rebooted. - */ - if (need_emergency_restart && amdgpu_ras_get_context(adev) && - amdgpu_ras_get_context(adev)->reboot) { - DRM_WARN("Emergency reboot."); - - ksys_sync_helper(); - emergency_restart(); - } - - dev_info(adev->dev, "GPU %s begin!\n", - need_emergency_restart ? "jobs stop":"reset"); - - if (!amdgpu_sriov_vf(adev)) - hive = amdgpu_get_xgmi_hive(adev); - if (hive) - mutex_lock(&hive->hive_lock); - - reset_context->job = job; - reset_context->hive = hive; - /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list * to put adev in the 1st position. */ - INIT_LIST_HEAD(&device_list); if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - list_add_tail(&tmp_adev->reset_list, &device_list); + list_add_tail(&tmp_adev->reset_list, device_list); if (adev->shutdown) tmp_adev->shutdown = true; + if (adev->pcie_reset_ctx.occurs_dpc) + tmp_adev->pcie_reset_ctx.in_link_reset = true; } - if (!list_is_first(&adev->reset_list, &device_list)) - list_rotate_to_front(&adev->reset_list, &device_list); - device_list_handle = &device_list; + if (!list_is_first(&adev->reset_list, device_list)) + list_rotate_to_front(&adev->reset_list, device_list); } else { - list_add_tail(&adev->reset_list, &device_list); - device_list_handle = &device_list; + list_add_tail(&adev->reset_list, device_list); } - if (!amdgpu_sriov_vf(adev)) { - r = amdgpu_device_health_check(device_list_handle); + if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) { + r = amdgpu_device_health_check(device_list); if (r) - goto end_reset; + return r; } - /* We need to lock reset domain only once both for XGMI and single device */ - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); + return 0; +} + +static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + if (list_empty(device_list)) + return; + tmp_adev = + list_first_entry(device_list, struct amdgpu_device, reset_list); amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); +} - /* block all schedulers and reset given job's ring */ - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { +static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + if (list_empty(device_list)) + return; + tmp_adev = + list_first_entry(device_list, struct amdgpu_device, reset_list); + amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); +} +static void amdgpu_device_halt_activities(struct amdgpu_device *adev, + struct amdgpu_job *job, + struct amdgpu_reset_context *reset_context, + struct list_head *device_list, + struct amdgpu_hive_info *hive, + bool need_emergency_restart) +{ + struct amdgpu_device *tmp_adev = NULL; + int i; + + /* block all schedulers and reset given job's ring */ + list_for_each_entry(tmp_adev, device_list, reset_list) { amdgpu_device_set_mp1_state(tmp_adev); /* @@ -6041,7 +6220,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * some audio codec errors. */ if (!amdgpu_device_suspend_display_audio(tmp_adev)) - audio_suspended = true; + tmp_adev->pcie_reset_ctx.audio_suspended = true; amdgpu_ras_set_error_query_ready(tmp_adev, false); @@ -6059,6 +6238,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* disable ras on ALL IPs */ if (!need_emergency_restart && + (!adev->pcie_reset_ctx.occurs_dpc) && amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); @@ -6075,25 +6255,23 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, } atomic_inc(&tmp_adev->gpu_reset_counter); } +} - if (need_emergency_restart) - goto skip_sched_resume; - - /* - * Must check guilty signal here since after this point all old - * HW fences are force signaled. - * - * job->base holds a reference to parent fence - */ - if (job && dma_fence_is_signaled(&job->hw_fence)) { - job_signaled = true; - dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); - goto skip_hw_reset; - } +static int amdgpu_device_asic_reset(struct amdgpu_device *adev, + struct list_head *device_list, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *tmp_adev = NULL; + int retry_limit = AMDGPU_MAX_RETRY_LIMIT; + int r = 0; retry: /* Rest of adevs pre asic reset from XGMI hive. */ - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + list_for_each_entry(tmp_adev, device_list, reset_list) { + if (adev->pcie_reset_ctx.occurs_dpc) + tmp_adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); + if (adev->pcie_reset_ctx.occurs_dpc) + tmp_adev->no_hw_access = false; /*TODO Should we stop ?*/ if (r) { dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", @@ -6105,6 +6283,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ /* Actual ASIC resets if needed.*/ /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { + + /* Bail out of reset early */ + if (amdgpu_ras_is_rma(adev)) + return -ENODEV; + if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) { dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); amdgpu_ras_set_fed(adev, true); @@ -6119,12 +6302,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (r) adev->asic_reset_res = r; } else { - r = amdgpu_do_asic_reset(device_list_handle, reset_context); + r = amdgpu_do_asic_reset(device_list, reset_context); if (r && r == -EAGAIN) goto retry; } - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + list_for_each_entry(tmp_adev, device_list, reset_list) { /* * Drop any pending non scheduler resets queued before reset is done. * Any reset scheduled after this point would be valid. Scheduler resets @@ -6134,10 +6317,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ amdgpu_device_stop_pending_resets(tmp_adev); } -skip_hw_reset: + return r; +} + +static int amdgpu_device_sched_resume(struct list_head *device_list, + struct amdgpu_reset_context *reset_context, + bool job_signaled) +{ + struct amdgpu_device *tmp_adev = NULL; + int i, r = 0; /* Post ASIC reset for all devs .*/ - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + list_for_each_entry(tmp_adev, device_list, reset_list) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -6168,13 +6359,23 @@ skip_hw_reset: amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); - if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(tmp_adev, + AMDGPU_SS_DEV_D0)) + dev_warn(tmp_adev->dev, + "smart shift update failed\n"); } } -skip_sched_resume: - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + return r; +} + +static void amdgpu_device_gpu_resume(struct amdgpu_device *adev, + struct list_head *device_list, + bool need_emergency_restart) +{ + struct amdgpu_device *tmp_adev = NULL; + + list_for_each_entry(tmp_adev, device_list, reset_list) { /* unlock kfd: SRIOV would do it separately */ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) amdgpu_amdkfd_post_reset(tmp_adev); @@ -6185,18 +6386,114 @@ skip_sched_resume: if (!adev->kfd.init_complete) amdgpu_amdkfd_device_init(adev); - if (audio_suspended) + if (tmp_adev->pcie_reset_ctx.audio_suspended) amdgpu_device_resume_display_audio(tmp_adev); amdgpu_device_unset_mp1_state(tmp_adev); amdgpu_ras_set_error_query_ready(tmp_adev, true); + } +} - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); - amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); +/** + * amdgpu_device_gpu_recover - reset the asic and recover scheduler + * + * @adev: amdgpu_device pointer + * @job: which job trigger hang + * @reset_context: amdgpu reset context pointer + * + * Attempt to reset the GPU if it has hung (all asics). + * Attempt to do soft-reset or full-reset and reinitialize Asic + * Returns 0 for success or an error on failure. + */ + +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job *job, + struct amdgpu_reset_context *reset_context) +{ + struct list_head device_list; + bool job_signaled = false; + struct amdgpu_hive_info *hive = NULL; + int r = 0; + bool need_emergency_restart = false; + + /* + * If it reaches here because of hang/timeout and a RAS error is + * detected at the same time, let RAS recovery take care of it. + */ + if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) && + !amdgpu_sriov_vf(adev) && + reset_context->src != AMDGPU_RESET_SRC_RAS) { + dev_dbg(adev->dev, + "Gpu recovery from source: %d yielding to RAS error recovery handling", + reset_context->src); + return 0; + } + + /* + * Special case: RAS triggered and full reset isn't supported + */ + need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); + + /* + * Flush RAM to disk so that after reboot + * the user can read log and see why the system rebooted. + */ + if (need_emergency_restart && amdgpu_ras_get_context(adev) && + amdgpu_ras_get_context(adev)->reboot) { + dev_warn(adev->dev, "Emergency reboot."); + + ksys_sync_helper(); + emergency_restart(); + } + + dev_info(adev->dev, "GPU %s begin!\n", + need_emergency_restart ? "jobs stop":"reset"); + + if (!amdgpu_sriov_vf(adev)) + hive = amdgpu_get_xgmi_hive(adev); + if (hive) + mutex_lock(&hive->hive_lock); + + reset_context->job = job; + reset_context->hive = hive; + INIT_LIST_HEAD(&device_list); + + if (amdgpu_device_recovery_prepare(adev, &device_list, hive)) + goto end_reset; + + /* We need to lock reset domain only once both for XGMI and single device */ + amdgpu_device_recovery_get_reset_lock(adev, &device_list); + + amdgpu_device_halt_activities(adev, job, reset_context, &device_list, + hive, need_emergency_restart); + if (need_emergency_restart) + goto skip_sched_resume; + /* + * Must check guilty signal here since after this point all old + * HW fences are force signaled. + * + * job->base holds a reference to parent fence + */ + if (job && dma_fence_is_signaled(&job->hw_fence.base)) { + job_signaled = true; + dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); + goto skip_hw_reset; + } + + r = amdgpu_device_asic_reset(adev, &device_list, reset_context); + if (r) + goto reset_unlock; +skip_hw_reset: + r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled); + if (r) + goto reset_unlock; +skip_sched_resume: + amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart); +reset_unlock: + amdgpu_device_recovery_put_reset_lock(adev, &device_list); end_reset: if (hive) { mutex_unlock(&hive->hive_lock); @@ -6208,8 +6505,17 @@ end_reset: atomic_set(&adev->reset_domain->reset_res, r); - if (!r) - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE); + if (!r) { + struct amdgpu_task_info *ti = NULL; + + if (job) + ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid); + + drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, + ti ? &ti->task : NULL); + + amdgpu_vm_put_task_info(ti); + } return r; } @@ -6528,12 +6834,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, #endif } -int amdgpu_device_baco_enter(struct drm_device *dev) +int amdgpu_device_baco_enter(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!amdgpu_device_supports_baco(dev)) + if (!amdgpu_device_supports_baco(adev)) return -ENOTSUPP; if (ras && adev->ras_enabled && @@ -6543,13 +6848,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev) return amdgpu_dpm_baco_enter(adev); } -int amdgpu_device_baco_exit(struct drm_device *dev) +int amdgpu_device_baco_exit(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0; - if (!amdgpu_device_supports_baco(dev)) + if (!amdgpu_device_supports_baco(adev)) return -ENOTSUPP; ret = amdgpu_dpm_baco_exit(adev); @@ -6580,12 +6884,14 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta { struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); - int i; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + struct amdgpu_reset_context reset_context; + struct list_head device_list; - DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); + dev_info(adev->dev, "PCI error: detected callback!!\n"); - if (adev->gmc.xgmi.num_physical_nodes > 1) { - DRM_WARN("No support for XGMI hive yet..."); + if (!amdgpu_dpm_is_link_reset_supported(adev)) { + dev_warn(adev->dev, "No support for XGMI hive yet...\n"); return PCI_ERS_RESULT_DISCONNECT; } @@ -6593,32 +6899,30 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta switch (state) { case pci_channel_io_normal: + dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state); return PCI_ERS_RESULT_CAN_RECOVER; - /* Fatal error, prepare for slot reset */ case pci_channel_io_frozen: - /* - * Locking adev->reset_domain->sem will prevent any external access - * to GPU during PCI error recovery - */ - amdgpu_device_lock_reset_domain(adev->reset_domain); - amdgpu_device_set_mp1_state(adev); - - /* - * Block any work scheduling as we do for regular GPU reset - * for the duration of the recovery - */ - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!amdgpu_ring_sched_ready(ring)) - continue; - - drm_sched_stop(&ring->sched, NULL); + /* Fatal error, prepare for slot reset */ + dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state); + + if (hive) + mutex_lock(&hive->hive_lock); + adev->pcie_reset_ctx.occurs_dpc = true; + memset(&reset_context, 0, sizeof(reset_context)); + INIT_LIST_HEAD(&device_list); + + amdgpu_device_recovery_prepare(adev, &device_list, hive); + amdgpu_device_recovery_get_reset_lock(adev, &device_list); + amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, + hive, false); + if (hive) { + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); } - atomic_inc(&adev->gpu_reset_counter); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ + dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state); return PCI_ERS_RESULT_DISCONNECT; } @@ -6631,8 +6935,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta */ pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) { + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); - DRM_INFO("PCI error: mmio enabled callback!!\n"); + dev_info(adev->dev, "PCI error: mmio enabled callback!!\n"); /* TODO - dump whatever for debugging purposes */ @@ -6656,10 +6962,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); - int r, i; struct amdgpu_reset_context reset_context; - u32 memsize; + struct amdgpu_device *tmp_adev; + struct amdgpu_hive_info *hive; struct list_head device_list; + int r = 0, i; + u32 memsize; /* PCI error slot reset should be skipped During RAS recovery */ if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || @@ -6667,15 +6975,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) amdgpu_ras_in_recovery(adev)) return PCI_ERS_RESULT_RECOVERED; - DRM_INFO("PCI error: slot reset callback!!\n"); + dev_info(adev->dev, "PCI error: slot reset callback!!\n"); memset(&reset_context, 0, sizeof(reset_context)); - INIT_LIST_HEAD(&device_list); - list_add_tail(&adev->reset_list, &device_list); - /* wait for asic to come out of reset */ - msleep(500); + msleep(700); /* Restore PCI confspace */ amdgpu_device_load_pci_state(pdev); @@ -6696,26 +7001,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - - adev->no_hw_access = true; - r = amdgpu_device_pre_asic_reset(adev, &reset_context); - adev->no_hw_access = false; - if (r) - goto out; + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); + INIT_LIST_HEAD(&device_list); - r = amdgpu_do_asic_reset(&device_list, &reset_context); + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + mutex_lock(&hive->hive_lock); + reset_context.hive = hive; + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + tmp_adev->pcie_reset_ctx.in_link_reset = true; + list_add_tail(&tmp_adev->reset_list, &device_list); + } + } else { + set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + list_add_tail(&adev->reset_list, &device_list); + } + r = amdgpu_device_asic_reset(adev, &device_list, &reset_context); out: if (!r) { if (amdgpu_device_cache_pci_state(adev->pdev)) pci_restore_state(adev->pdev); - - DRM_INFO("PCIe error recovery succeeded\n"); + dev_info(adev->dev, "PCIe error recovery succeeded\n"); } else { - DRM_ERROR("PCIe error recovery failed, err:%d", r); - amdgpu_device_unset_mp1_state(adev); - amdgpu_device_unlock_reset_domain(adev->reset_domain); + dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r); + if (hive) { + list_for_each_entry(tmp_adev, &device_list, reset_list) + amdgpu_device_unset_mp1_state(tmp_adev); + } + amdgpu_device_recovery_put_reset_lock(adev, &device_list); + } + + if (hive) { + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); } return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; @@ -6732,26 +7051,37 @@ void amdgpu_pci_resume(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); - int i; - + struct list_head device_list; + struct amdgpu_hive_info *hive = NULL; + struct amdgpu_device *tmp_adev = NULL; - DRM_INFO("PCI error: resume callback!!\n"); + dev_info(adev->dev, "PCI error: resume callback!!\n"); /* Only continue execution for the case of pci_channel_io_frozen */ if (adev->pci_channel_state != pci_channel_io_frozen) return; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; + INIT_LIST_HEAD(&device_list); - if (!amdgpu_ring_sched_ready(ring)) - continue; + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + mutex_lock(&hive->hive_lock); + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + tmp_adev->pcie_reset_ctx.in_link_reset = false; + list_add_tail(&tmp_adev->reset_list, &device_list); + } + } else + list_add_tail(&adev->reset_list, &device_list); - drm_sched_start(&ring->sched, 0); - } + amdgpu_device_sched_resume(&device_list, NULL, NULL); + amdgpu_device_gpu_resume(adev, &device_list, false); + amdgpu_device_recovery_put_reset_lock(adev, &device_list); + adev->pcie_reset_ctx.occurs_dpc = false; - amdgpu_device_unset_mp1_state(adev); - amdgpu_device_unlock_reset_domain(adev->reset_domain); + if (hive) { + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); + } } bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) @@ -6770,11 +7100,11 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) adev->pci_state = pci_store_saved_state(pdev); if (!adev->pci_state) { - DRM_ERROR("Failed to store PCI saved state"); + dev_err(adev->dev, "Failed to store PCI saved state"); return false; } } else { - DRM_WARN("Failed to save PCI state, err:%d\n", r); + dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r); return false; } @@ -6795,7 +7125,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev) if (!r) { pci_restore_state(pdev); } else { - DRM_WARN("Failed to load PCI state, err:%d\n", r); + dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r); return false; } @@ -7041,7 +7371,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, dep = amdgpu_sync_peek_fence(&isolation->prev, ring); r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); if (r) - DRM_WARN("OOM tracking isolation\n"); + dev_warn(adev->dev, "OOM tracking isolation\n"); out_grab_ref: dma_fence_get(dep); @@ -7109,9 +7439,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, tmp_ = RREG32(reg_addr); loop--; if (!loop) { - DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", - inst, reg_name, (uint32_t)expected_value, - (uint32_t)(tmp_ & (mask))); + dev_warn( + adev->dev, + "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", + inst, reg_name, (uint32_t)expected_value, + (uint32_t)(tmp_ & (mask))); ret = -ETIMEDOUT; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index dc2713ec95a5..81b3443c8d7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -120,6 +120,8 @@ MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); #define mmIP_DISCOVERY_VERSION 0x16A00 #define mmRCC_CONFIG_MEMSIZE 0xde3 @@ -268,9 +270,10 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, uint8_t *binary) { + bool sz_valid = true; uint64_t vram_size; - u32 msg; int i, ret = 0; + u32 msg; if (!amdgpu_sriov_vf(adev)) { /* It can take up to a second for IFWI init to complete on some dGPUs, @@ -289,9 +292,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, } } - vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; + vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); + if (!vram_size || vram_size == U32_MAX) + sz_valid = false; + else + vram_size <<= 20; - if (vram_size) { + if (sz_valid) { uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, adev->mman.discovery_tmr_size, false); @@ -299,6 +306,11 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); } + if (ret) + dev_err(adev->dev, + "failed to read discovery info from memory, vram size read: %llx", + vram_size); + return ret; } @@ -309,10 +321,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, const struct firmware *fw; int r; - r = request_firmware(&fw, fw_name, adev->dev); + r = firmware_request_nowarn(&fw, fw_name, adev->dev); if (r) { - dev_err(adev->dev, "can't load firmware \"%s\"\n", - fw_name); + if (amdgpu_discovery == 2) + dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); + else + drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); return r; } @@ -447,16 +461,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) /* Read from file if it is the preferred option */ fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) { - dev_info(adev->dev, "use ip discovery information from file"); + drm_dbg(&adev->ddev, "use ip discovery information from file"); r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); - - if (r) { - dev_err(adev->dev, "failed to read ip discovery binary from file\n"); - r = -EINVAL; + if (r) goto out; - } - } else { + drm_dbg(&adev->ddev, "use ip discovery information from memory"); r = amdgpu_discovery_read_binary_from_mem( adev, adev->mman.discovery_bin); if (r) @@ -1326,10 +1336,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) int r; r = amdgpu_discovery_init(adev); - if (r) { - DRM_ERROR("amdgpu_discovery_init failed\n"); + if (r) return r; - } wafl_ver = 0; adev->gfx.xcc_mask = 0; @@ -2567,8 +2575,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; default: r = amdgpu_discovery_reg_base_init(adev); - if (r) - return -EINVAL; + if (r) { + drm_err(&adev->ddev, "discovery failed: %d\n", r); + return r; + } amdgpu_discovery_harvest_ip(adev); amdgpu_discovery_get_gfx_info(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 35c778426a7c..51bab32fd8c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1196,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; rfb->base.obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd); /* Verify that the modifier is supported. */ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, mode_cmd->modifier[0])) { @@ -1297,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct amdgpu_framebuffer *amdgpu_fb; @@ -1317,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ bo = gem_to_amdgpu_bo(obj); domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); - if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { + if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); drm_gem_object_put(obj); return ERR_PTR(-EINVAL); @@ -1330,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, - mode_cmd, obj); + info, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index dfa0d642ac16..930c171473b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_format_info * amdgpu_lookup_format_info(u32 format, uint64_t modifier); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 9f627caedc3f..ff98c87b2e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -43,6 +43,29 @@ #include <linux/dma-fence-array.h> #include <linux/pci-p2pdma.h> +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops; + +/** + * dma_buf_attach_adev - Helper to get adev of an attachment + * + * @attach: attachment + * + * Returns: + * A struct amdgpu_device * if the attaching device is an amdgpu device or + * partition, NULL otherwise. + */ +static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach) +{ + if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) { + struct drm_gem_object *obj = attach->importer_priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + return amdgpu_ttm_adev(bo->tbo.bdev); + } + + return NULL; +} + /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * @@ -54,11 +77,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { + struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach); struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) + if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && + pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; amdgpu_vm_bo_update_shared(bo); @@ -75,11 +100,35 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, */ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) { - struct drm_gem_object *obj = attach->dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct dma_buf *dmabuf = attach->dmabuf; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv); + u32 domains = bo->allowed_domains; + + dma_resv_assert_held(dmabuf->resv); + + /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP + * support if all attachments can do P2P. If any attachment can't do + * P2P just pin into GTT instead. + * + * To avoid with conflicting pinnings between GPUs and RDMA when move + * notifiers are disabled, only allow pinning in VRAM when move + * notiers are enabled. + */ + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { + domains &= ~AMDGPU_GEM_DOMAIN_VRAM; + } else { + list_for_each_entry(attach, &dmabuf->attachments, node) + if (!attach->peer2peer) + domains &= ~AMDGPU_GEM_DOMAIN_VRAM; + } - /* pin buffer into GTT */ - return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); + if (domains & AMDGPU_GEM_DOMAIN_VRAM) + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + + if (WARN_ON(!domains)) + return -EINVAL; + + return amdgpu_bo_pin(bo, domains); } /** @@ -134,9 +183,6 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) return ERR_PTR(r); - - } else if (bo->tbo.resource->mem_type != TTM_PL_TT) { - return ERR_PTR(-EBUSY); } switch (bo->tbo.resource->mem_type) { @@ -153,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, break; case TTM_PL_VRAM: + /* XGMI-accessible memory should never be DMA-mapped */ + if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible( + dma_buf_attach_adev(attach), bo))) + return ERR_PTR(-EINVAL); + r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, bo->tbo.base.size, attach->dev, dir, &sgt); @@ -184,7 +235,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - if (sgt->sgl->page_link) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); @@ -459,8 +510,11 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *gobj; - if (obj->import_attach) { - struct dma_buf *dma_buf = obj->import_attach->dmabuf; + if (!adev) + return false; + + if (drm_gem_is_imported(obj)) { + struct dma_buf *dma_buf = obj->dma_buf; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* No XGMI with non AMD GPUs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c index 3f3662e8b871..3040437d99c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c @@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) if (index < adev->doorbell.num_kernel_doorbells) return readl(adev->doorbell.cpu_addr + index); - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", + index); return 0; } @@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) if (index < adev->doorbell.num_kernel_doorbells) writel(v, adev->doorbell.cpu_addr + index); else - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, + "writing beyond doorbell aperture: 0x%08x!\n", index); } /** @@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) if (index < adev->doorbell.num_kernel_doorbells) return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index)); - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", + index); return 0; } @@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) if (index < adev->doorbell.num_kernel_doorbells) atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v); else - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, + "writing beyond doorbell aperture: 0x%08x!\n", index); } /** @@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev) NULL, (void **)&adev->doorbell.cpu_addr); if (r) { - DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r); + dev_err(adev->dev, + "Failed to allocate kernel doorbells, err=%d\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 23cfce5aa1fc..395c6be901ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -51,6 +51,8 @@ #include "amdgpu_reset.h" #include "amdgpu_sched.h" #include "amdgpu_xgmi.h" +#include "amdgpu_userq.h" +#include "amdgpu_userq_fence.h" #include "../amdxcp/amdgpu_xcp_drv.h" /* @@ -123,9 +125,10 @@ * - 3.61.0 - Contains fix for RV/PCO compute queues * - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT * - 3.63.0 - GFX12 display DCC supports 256B max compressed block size + * - 3.64.0 - Userq IP support query */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 63 +#define KMS_DRIVER_MINOR 64 #define KMS_DRIVER_PATCHLEVEL 0 /* @@ -140,6 +143,8 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5), AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6), AMDGPU_DEBUG_SMU_POOL = BIT(7), + AMDGPU_DEBUG_VM_USERPTR = BIT(8), + AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -176,7 +181,7 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu; char *amdgpu_virtual_display; -bool enforce_isolation; +int amdgpu_enforce_isolation = -1; int amdgpu_modeset = -1; /* Specifies the default granularity for SVM, used in buffer @@ -238,6 +243,8 @@ int amdgpu_agp = -1; /* auto */ int amdgpu_wbrf = -1; int amdgpu_damage_clips = -1; /* auto */ int amdgpu_umsch_mm_fwlog; +int amdgpu_rebar = -1; /* auto */ +int amdgpu_user_queue = -1; DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, "DRM_UT_CORE", @@ -355,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint * The second one is for Compute. The third and fourth ones are * for SDMA and Video. * - * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) - * jobs is 10000. The timeout for compute is 60000. + * By default(with no lockup_timeout settings), the timeout for all jobs is 10000. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " - "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " - "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); +MODULE_PARM_DESC(lockup_timeout, + "GPU lockup timeout in ms (default: 10000 for all jobs. " + "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); /** @@ -1033,11 +1040,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); /** - * DOC: enforce_isolation (bool) - * enforce process isolation between graphics and compute via using the same reserved vmid. + * DOC: enforce_isolation (int) + * enforce process isolation between graphics and compute. + * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader) */ -module_param(enforce_isolation, bool, 0444); -MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on"); +module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444); +MODULE_PARM_DESC(enforce_isolation, +"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)"); /** * DOC: modeset (int) @@ -1096,6 +1105,28 @@ MODULE_PARM_DESC(wbrf, "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)"); module_param_named(wbrf, amdgpu_wbrf, int, 0444); +/** + * DOC: rebar (int) + * Allow BAR resizing. Disable this to prevent the driver from attempting + * to resize the BAR if the GPU supports it and there is available MMIO space. + * Note that this just prevents the driver from resizing the BAR. The BIOS + * may have already resized the BAR at boot time. + */ +MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)"); +module_param_named(rebar, amdgpu_rebar, int, 0444); + +/** + * DOC: user_queue (int) + * Enable user queues on systems that support user queues. Possible values: + * + * - -1 = auto (ASIC specific default) + * - 0 = user queues disabled + * - 1 = user queues enabled and kernel queues enabled (if supported) + * - 2 = user queues enabled and kernel queues disabled + */ +MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)"); +module_param_named(user_queue, amdgpu_user_queue, int, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ @@ -1809,7 +1840,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = { }; static const struct pci_device_id pciidlist[] = { -#ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1882,8 +1912,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, @@ -1966,7 +1994,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, -#endif /* topaz */ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, @@ -2248,6 +2275,15 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: use vram for smu pool\n"); adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM; } + if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) { + pr_info("debug: VM mode debug for userptr is enabled\n"); + adev->debug_vm_userptr = true; + } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) { + pr_info("debug: disable kernel logs of correctable errors\n"); + adev->debug_disable_ce_logs = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2291,7 +2327,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, amdgpu_aspm = 0; if (amdgpu_virtual_display || - amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) + amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) supports_atomic = true; if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { @@ -2313,14 +2349,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENOTSUPP; } + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: #ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: + if (!amdgpu_si_support) { dev_info(&pdev->dev, "SI support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2328,16 +2364,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); + return -ENODEV; #endif + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: #ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: + if (!amdgpu_cik_support) { dev_info(&pdev->dev, "CIK support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2345,8 +2383,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); + return -ENODEV; #endif + default: + break; + } adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) @@ -2413,10 +2457,10 @@ retry_init: if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { /* only need to skip on ATPX */ - if (amdgpu_device_supports_px(ddev)) + if (amdgpu_device_supports_px(adev)) dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); /* we want direct complete for BOCO */ - if (amdgpu_device_supports_boco(ddev)) + if (amdgpu_device_supports_boco(adev)) dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND | DPM_FLAG_MAY_SKIP_RESUME); @@ -2449,9 +2493,9 @@ retry_init: * into D0 state. Then there will be a PMFW-aware D-state * transition(D0->D3) on runpm suspend. */ - if (amdgpu_device_supports_baco(ddev) && + if (amdgpu_device_supports_baco(adev) && !(adev->flags & AMD_IS_APU) && - (adev->asic_type >= CHIP_NAVI10)) + adev->asic_type >= CHIP_NAVI10) amdgpu_get_secondary_funcs(adev); } @@ -2468,6 +2512,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); + amdgpu_ras_eeprom_check_and_recover(adev); amdgpu_xcp_dev_unplug(adev); amdgpu_gmc_prepare_nps_mode_change(adev); drm_dev_unplug(dev); @@ -2497,6 +2542,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) if (amdgpu_ras_intr_triggered()) return; + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain @@ -2513,11 +2562,14 @@ static int amdgpu_pmops_prepare(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return 0; + /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ - if (amdgpu_device_supports_boco(drm_dev) && - pm_runtime_suspended(dev)) + if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev)) return 1; /* if we will not support s3 or s2i for the device @@ -2532,7 +2584,7 @@ static int amdgpu_pmops_prepare(struct device *dev) static void amdgpu_pmops_complete(struct device *dev) { - /* nothing to do */ + amdgpu_device_complete(dev_get_drvdata(dev)); } static int amdgpu_pmops_suspend(struct device *dev) @@ -2544,8 +2596,20 @@ static int amdgpu_pmops_suspend(struct device *dev) adev->in_s0ix = true; else if (amdgpu_acpi_is_s3_active(adev)) adev->in_s3 = true; - if (!adev->in_s0ix && !adev->in_s3) + if (!adev->in_s0ix && !adev->in_s3) { + /* don't allow going deep first time followed by s2idle the next time */ + if (adev->last_suspend_state != PM_SUSPEND_ON && + adev->last_suspend_state != pm_suspend_target_state) { + drm_err_once(drm_dev, "Unsupported suspend state %d\n", + pm_suspend_target_state); + return -EINVAL; + } return 0; + } + + /* cache the state last used for suspend */ + adev->last_suspend_state = pm_suspend_target_state; + return amdgpu_device_suspend(drm_dev, true); } @@ -2599,18 +2663,22 @@ static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - int r; - r = amdgpu_device_resume(drm_dev, true); - adev->in_s4 = false; + /* do not resume device if it's normal hibernation */ + if (!pm_hibernate_is_recovering()) + return 0; - return r; + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return 0; return amdgpu_device_suspend(drm_dev, true); } @@ -2618,9 +2686,6 @@ static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - - adev->in_s4 = false; return amdgpu_device_resume(drm_dev, true); } @@ -2692,6 +2757,29 @@ static int amdgpu_runtime_idle_check_display(struct device *dev) return 0; } +static int amdgpu_runtime_idle_check_userq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0; + + mutex_lock(&adev->userq_mutex); + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + ret = -EBUSY; + goto done; + } + } +done: + mutex_unlock(&adev->userq_mutex); + + return ret; +} + static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -2707,6 +2795,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) ret = amdgpu_runtime_idle_check_display(dev); if (ret) return ret; + ret = amdgpu_runtime_idle_check_userq(dev); + if (ret) + return ret; /* wait for all rings to drain before suspending */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { @@ -2760,7 +2851,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) /* nothing to do */ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { - amdgpu_device_baco_enter(drm_dev); + amdgpu_device_baco_enter(adev); } dev_dbg(&pdev->dev, "asic/device is runtime suspended\n"); @@ -2801,7 +2892,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { - amdgpu_device_baco_exit(drm_dev); + amdgpu_device_baco_exit(adev); } ret = amdgpu_device_resume(drm_dev, false); if (ret) { @@ -2828,12 +2919,30 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) } ret = amdgpu_runtime_idle_check_display(dev); + if (ret) + goto done; + ret = amdgpu_runtime_idle_check_userq(dev); +done: pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); return ret; } +static int amdgpu_drm_release(struct inode *inode, struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + struct amdgpu_fpriv *fpriv = file_priv->driver_priv; + + if (fpriv) { + fpriv->evf_mgr.fd_closing = true; + amdgpu_eviction_fence_destroy(&fpriv->evf_mgr); + amdgpu_userq_mgr_fini(&fpriv->userq_mgr); + } + + return drm_release(inode, filp); +} + long amdgpu_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -2885,7 +2994,7 @@ static const struct file_operations amdgpu_driver_kms_fops = { .owner = THIS_MODULE, .open = drm_open, .flush = amdgpu_flush, - .release = drm_release, + .release = amdgpu_drm_release, .unlocked_ioctl = amdgpu_drm_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, @@ -2932,6 +3041,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; static const struct drm_driver amdgpu_kms_driver = { @@ -3018,7 +3130,7 @@ static int __init amdgpu_init(void) if (r) goto error_sync; - r = amdgpu_fence_slab_init(); + r = amdgpu_userq_fence_slab_init(); if (r) goto error_fence; @@ -3052,7 +3164,7 @@ static void __exit amdgpu_exit(void) amdgpu_unregister_atpx_handler(); amdgpu_acpi_release(); amdgpu_sync_fini(); - amdgpu_fence_slab_fini(); + amdgpu_userq_fence_slab_fini(); mmu_notifier_synchronize(); amdgpu_xcp_drv_release(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c new file mode 100644 index 000000000000..23d7d0b0d625 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/sched.h> +#include <drm/drm_exec.h> +#include "amdgpu.h" + +#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name) +#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr) + +static const char * +amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence) +{ + return "amdgpu_eviction_fence"; +} + +static const char * +amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f) +{ + struct amdgpu_eviction_fence *ef; + + ef = container_of(f, struct amdgpu_eviction_fence, base); + return ef->timeline_name; +} + +int +amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct drm_exec *exec) +{ + struct amdgpu_eviction_fence *old_ef, *new_ef; + struct drm_gem_object *obj; + unsigned long index; + int ret; + + if (evf_mgr->ev_fence && + !dma_fence_is_signaled(&evf_mgr->ev_fence->base)) + return 0; + /* + * Steps to replace eviction fence: + * * lock all objects in exec (caller) + * * create a new eviction fence + * * update new eviction fence in evf_mgr + * * attach the new eviction fence to BOs + * * release the old fence + * * unlock the objects (caller) + */ + new_ef = amdgpu_eviction_fence_create(evf_mgr); + if (!new_ef) { + DRM_ERROR("Failed to create new eviction fence\n"); + return -ENOMEM; + } + + /* Update the eviction fence now */ + spin_lock(&evf_mgr->ev_fence_lock); + old_ef = evf_mgr->ev_fence; + evf_mgr->ev_fence = new_ef; + spin_unlock(&evf_mgr->ev_fence_lock); + + /* Attach the new fence */ + drm_exec_for_each_locked_object(exec, index, obj) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + if (!bo) + continue; + ret = amdgpu_eviction_fence_attach(evf_mgr, bo); + if (ret) { + DRM_ERROR("Failed to attch new eviction fence\n"); + goto free_err; + } + } + + /* Free old fence */ + if (old_ef) + dma_fence_put(&old_ef->base); + return 0; + +free_err: + kfree(new_ef); + return ret; +} + +static void +amdgpu_eviction_fence_suspend_worker(struct work_struct *work) +{ + struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work); + struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr); + struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; + struct amdgpu_eviction_fence *ev_fence; + + mutex_lock(&uq_mgr->userq_mutex); + spin_lock(&evf_mgr->ev_fence_lock); + ev_fence = evf_mgr->ev_fence; + if (ev_fence) + dma_fence_get(&ev_fence->base); + else + goto unlock; + spin_unlock(&evf_mgr->ev_fence_lock); + + amdgpu_userq_evict(uq_mgr, ev_fence); + + mutex_unlock(&uq_mgr->userq_mutex); + dma_fence_put(&ev_fence->base); + return; + +unlock: + spin_unlock(&evf_mgr->ev_fence_lock); + mutex_unlock(&uq_mgr->userq_mutex); +} + +static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f) +{ + struct amdgpu_eviction_fence_mgr *evf_mgr; + struct amdgpu_eviction_fence *ev_fence; + + if (!f) + return true; + + ev_fence = to_ev_fence(f); + evf_mgr = ev_fence->evf_mgr; + + schedule_delayed_work(&evf_mgr->suspend_work, 0); + return true; +} + +static const struct dma_fence_ops amdgpu_eviction_fence_ops = { + .get_driver_name = amdgpu_eviction_fence_get_driver_name, + .get_timeline_name = amdgpu_eviction_fence_get_timeline_name, + .enable_signaling = amdgpu_eviction_fence_enable_signaling, +}; + +void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct amdgpu_eviction_fence *ev_fence) +{ + spin_lock(&evf_mgr->ev_fence_lock); + dma_fence_signal(&ev_fence->base); + spin_unlock(&evf_mgr->ev_fence_lock); +} + +struct amdgpu_eviction_fence * +amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr) +{ + struct amdgpu_eviction_fence *ev_fence; + + ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL); + if (!ev_fence) + return NULL; + + ev_fence->evf_mgr = evf_mgr; + get_task_comm(ev_fence->timeline_name, current); + spin_lock_init(&ev_fence->lock); + dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops, + &ev_fence->lock, evf_mgr->ev_fence_ctx, + atomic_inc_return(&evf_mgr->ev_fence_seq)); + return ev_fence; +} + +void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr) +{ + struct amdgpu_eviction_fence *ev_fence; + + /* Wait for any pending work to execute */ + flush_delayed_work(&evf_mgr->suspend_work); + + spin_lock(&evf_mgr->ev_fence_lock); + ev_fence = evf_mgr->ev_fence; + spin_unlock(&evf_mgr->ev_fence_lock); + + if (!ev_fence) + return; + + dma_fence_wait(&ev_fence->base, false); + + /* Last unref of ev_fence */ + dma_fence_put(&ev_fence->base); +} + +int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct amdgpu_bo *bo) +{ + struct amdgpu_eviction_fence *ev_fence; + struct dma_resv *resv = bo->tbo.base.resv; + int ret; + + if (!resv) + return 0; + + ret = dma_resv_reserve_fences(resv, 1); + if (ret) { + DRM_DEBUG_DRIVER("Failed to resv fence space\n"); + return ret; + } + + spin_lock(&evf_mgr->ev_fence_lock); + ev_fence = evf_mgr->ev_fence; + if (ev_fence) + dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP); + spin_unlock(&evf_mgr->ev_fence_lock); + + return 0; +} + +void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct amdgpu_bo *bo) +{ + struct dma_fence *stub = dma_fence_get_stub(); + + dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx, + stub, DMA_RESV_USAGE_BOOKKEEP); + dma_fence_put(stub); +} + +int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr) +{ + /* This needs to be done one time per open */ + atomic_set(&evf_mgr->ev_fence_seq, 0); + evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1); + spin_lock_init(&evf_mgr->ev_fence_lock); + + INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h new file mode 100644 index 000000000000..fcd867b7147d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGPU_EV_FENCE_H_ +#define AMDGPU_EV_FENCE_H_ + +struct amdgpu_eviction_fence { + struct dma_fence base; + spinlock_t lock; + char timeline_name[TASK_COMM_LEN]; + struct amdgpu_eviction_fence_mgr *evf_mgr; +}; + +struct amdgpu_eviction_fence_mgr { + u64 ev_fence_ctx; + atomic_t ev_fence_seq; + spinlock_t ev_fence_lock; + struct amdgpu_eviction_fence *ev_fence; + struct delayed_work suspend_work; + uint8_t fd_closing; +}; + +/* Eviction fence helper functions */ +struct amdgpu_eviction_fence * +amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr); + +void +amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr); + +int +amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct amdgpu_bo *bo); + +void +amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct amdgpu_bo *bo); + +int +amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr); + +void +amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct amdgpu_eviction_fence *ev_fence); + +int +amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr, + struct drm_exec *exec); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 2f24a6aa13bf..9e7506965cab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -42,37 +42,6 @@ #include "amdgpu_reset.h" /* - * Fences mark an event in the GPUs pipeline and are used - * for GPU/CPU synchronization. When the fence is written, - * it is expected that all buffers associated with that fence - * are no longer in use by the associated ring on the GPU and - * that the relevant GPU caches have been flushed. - */ - -struct amdgpu_fence { - struct dma_fence base; - - /* RB, DMA, etc. */ - struct amdgpu_ring *ring; - ktime_t start_timestamp; -}; - -static struct kmem_cache *amdgpu_fence_slab; - -int amdgpu_fence_slab_init(void) -{ - amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN); - if (!amdgpu_fence_slab) - return -ENOMEM; - return 0; -} - -void amdgpu_fence_slab_fini(void) -{ - rcu_barrier(); - kmem_cache_destroy(amdgpu_fence_slab); -} -/* * Cast helper */ static const struct dma_fence_ops amdgpu_fence_ops; @@ -130,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * * @ring: ring the fence is associated with * @f: resulting fence object - * @job: job the fence is embedded in + * @af: amdgpu fence input * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, - unsigned int flags) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + struct amdgpu_fence *af, unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; @@ -146,40 +115,35 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd uint32_t seq; int r; - if (job == NULL) { - /* create a sperate hw fence */ - am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); - if (am_fence == NULL) + if (!af) { + /* create a separate hw fence */ + am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); + if (!am_fence) return -ENOMEM; - fence = &am_fence->base; - am_fence->ring = ring; + am_fence->context = 0; } else { - /* take use of job-embedded fence */ - fence = &job->hw_fence; + am_fence = af; } + fence = &am_fence->base; + am_fence->ring = ring; seq = ++ring->fence_drv.sync_seq; - if (job && job->job_run_counter) { - /* reinit seq for resubmitted jobs */ - fence->seqno = seq; - /* TO be inline with external fence creation and other drivers */ + am_fence->seq = seq; + if (af) { + dma_fence_init(fence, &amdgpu_job_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); + /* Against remove in amdgpu_job_{free, free_cb} */ dma_fence_get(fence); } else { - if (job) { - dma_fence_init(fence, &amdgpu_job_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - /* Against remove in amdgpu_job_{free, free_cb} */ - dma_fence_get(fence); - } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - } + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); } amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); + amdgpu_fence_save_wptr(fence); pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { @@ -280,7 +244,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); - if (del_timer(&ring->fence_drv.fallback_timer) && + if (timer_delete(&ring->fence_drv.fallback_timer) && seq != ring->fence_drv.sync_seq) amdgpu_fence_schedule_fallback(ring); @@ -292,6 +256,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) do { struct dma_fence *fence, **ptr; + struct amdgpu_fence *am_fence; ++last_seq; last_seq &= drv->num_fences_mask; @@ -304,6 +269,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; + /* Save the wptr in the fence driver so we know what the last processed + * wptr was. This is required for re-emitting the ring state for + * queues that are reset but are not guilty and thus have no guilty fence. + */ + am_fence = container_of(fence, struct amdgpu_fence, base); + drv->signalled_wptr = am_fence->wptr; dma_fence_signal(fence); dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -322,11 +293,13 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) */ static void amdgpu_fence_fallback(struct timer_list *t) { - struct amdgpu_ring *ring = from_timer(ring, t, - fence_drv.fallback_timer); + struct amdgpu_ring *ring = timer_container_of(ring, t, + fence_drv.fallback_timer); if (amdgpu_fence_process(ring)) - DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); + dev_warn(ring->adev->dev, + "Fence fallback timer expired on ring %s\n", + ring->name); } /** @@ -618,7 +591,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); - del_timer_sync(&ring->fence_drv.fallback_timer); + timer_delete_sync(&ring->fence_drv.fallback_timer); } } @@ -718,7 +691,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) * it right here or we won't be able to track them in fence_drv * and they will remain unsignaled during sa_bo free. */ - job = container_of(old, struct amdgpu_job, hw_fence); + job = container_of(old, struct amdgpu_job, hw_fence.base); if (!job->base.s_fence && !dma_fence_is_signaled(old)) dma_fence_signal(old); RCU_INIT_POINTER(*ptr, NULL); @@ -764,6 +737,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) amdgpu_fence_process(ring); } + +/** + * Kernel queue reset handling + * + * The driver can reset individual queues for most engines, but those queues + * may contain work from multiple contexts. Resetting the queue will reset + * lose all of that state. In order to minimize the collateral damage, the + * driver will save the ring contents which are not associated with the guilty + * context prior to resetting the queue. After resetting the queue the queue + * contents from the other contexts is re-emitted to the rings so that it can + * be processed by the engine. To handle this, we save the queue's write + * pointer (wptr) in the fences associated with each context. If we get a + * queue timeout, we can then use the wptrs from the fences to determine + * which data needs to be saved out of the queue's ring buffer. + */ + +/** + * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence + * + * @fence: fence of the ring to signal + * + */ +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +{ + dma_fence_set_error(&fence->base, -ETIME); + amdgpu_fence_write(fence->ring, fence->seq); + amdgpu_fence_process(fence->ring); +} + +void amdgpu_fence_save_wptr(struct dma_fence *fence) +{ + struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base); + + am_fence->wptr = am_fence->ring->wptr; +} + +static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, + u64 start_wptr, u32 end_wptr) +{ + unsigned int first_idx = start_wptr & ring->buf_mask; + unsigned int last_idx = end_wptr & ring->buf_mask; + unsigned int i; + + /* Backup the contents of the ring buffer. */ + for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) + ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; +} + +void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + struct dma_fence *unprocessed; + struct dma_fence __rcu **ptr; + struct amdgpu_fence *fence; + u64 wptr, i, seqno; + + seqno = amdgpu_fence_read(ring); + wptr = ring->fence_drv.signalled_wptr; + ring->ring_backup_entries_to_copy = 0; + + for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { + ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; + rcu_read_lock(); + unprocessed = rcu_dereference(*ptr); + + if (unprocessed && !dma_fence_is_signaled(unprocessed)) { + fence = container_of(unprocessed, struct amdgpu_fence, base); + + /* save everything if the ring is not guilty, otherwise + * just save the content from other contexts. + */ + if (!guilty_fence || (fence->context != guilty_fence->context)) + amdgpu_ring_backup_unprocessed_command(ring, wptr, + fence->wptr); + wptr = fence->wptr; + } + rcu_read_unlock(); + } +} + /* * Common fence implementation */ @@ -780,7 +833,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); return (const char *)to_amdgpu_ring(job->base.sched)->name; } @@ -810,7 +863,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) */ static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); @@ -830,7 +883,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu) struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); /* free fence_slab if it's separated fence*/ - kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); + kfree(to_amdgpu_fence(f)); } /** @@ -845,7 +898,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu) struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); /* free job if fence has a parent job */ - kfree(container_of(f, struct amdgpu_job, hw_fence)); + kfree(container_of(f, struct amdgpu_job, hw_fence.base)); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 1ae88c459da5..b0082aa7f3c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) { - DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); + dev_warn(adev->dev, + "Cannot access FRU, EEPROM accessor not initialized"); return -ENODEV; } @@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf, sizeof(buf)); if (len != 8) { - DRM_ERROR("Couldn't read the IPMI Common Header: %d", len); + dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d", + len); return len < 0 ? len : -EIO; } if (buf[0] != 1) { - DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]); + dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x", + buf[0]); return -EIO; } for (csum = 0; len > 0; len--) csum += buf[len - 1]; if (csum) { - DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum); + dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x", + csum); return -EIO; } @@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* Read the header of the PIA. */ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3); if (len != 3) { - DRM_ERROR("Couldn't read the Product Info Area header: %d", len); + dev_err(adev->dev, + "Couldn't read the Product Info Area header: %d", len); return len < 0 ? len : -EIO; } if (buf[0] != 1) { - DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]); + dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x", + buf[0]); return -EIO; } @@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size); if (len != size) { kfree(pia); - DRM_ERROR("Couldn't read the Product Info Area: %d", len); + dev_err(adev->dev, "Couldn't read the Product Info Area: %d", + len); return len < 0 ? len : -EIO; } for (csum = 0; size > 0; size--) csum += pia[size - 1]; if (csum) { - DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum); + dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x", + csum); kfree(pia); return -EIO; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 69429df09477..6626a6e64ff5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -36,6 +36,7 @@ #include <drm/drm_exec.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/ttm/ttm_tt.h> +#include <drm/drm_syncobj.h> #include "amdgpu.h" #include "amdgpu_display.h" @@ -44,6 +45,114 @@ #include "amdgpu_xgmi.h" #include "amdgpu_vm.h" +static int +amdgpu_gem_add_input_fence(struct drm_file *filp, + uint64_t syncobj_handles_array, + uint32_t num_syncobj_handles) +{ + struct dma_fence *fence; + uint32_t *syncobj_handles; + int ret, i; + + if (!num_syncobj_handles) + return 0; + + syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array), + size_mul(sizeof(uint32_t), num_syncobj_handles)); + if (IS_ERR(syncobj_handles)) + return PTR_ERR(syncobj_handles); + + for (i = 0; i < num_syncobj_handles; i++) { + + if (!syncobj_handles[i]) { + ret = -EINVAL; + goto free_memdup; + } + + ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence); + if (ret) + goto free_memdup; + + dma_fence_wait(fence, false); + + /* TODO: optimize async handling */ + dma_fence_put(fence); + } + +free_memdup: + kfree(syncobj_handles); + return ret; +} + +static int +amdgpu_gem_update_timeline_node(struct drm_file *filp, + uint32_t syncobj_handle, + uint64_t point, + struct drm_syncobj **syncobj, + struct dma_fence_chain **chain) +{ + if (!syncobj_handle) + return 0; + + /* Find the sync object */ + *syncobj = drm_syncobj_find(filp, syncobj_handle); + if (!*syncobj) + return -ENOENT; + + if (!point) + return 0; + + /* Allocate the chain node */ + *chain = dma_fence_chain_alloc(); + if (!*chain) { + drm_syncobj_put(*syncobj); + return -ENOMEM; + } + + return 0; +} + +static void +amdgpu_gem_update_bo_mapping(struct drm_file *filp, + struct amdgpu_bo_va *bo_va, + uint32_t operation, + uint64_t point, + struct dma_fence *fence, + struct drm_syncobj *syncobj, + struct dma_fence_chain *chain) +{ + struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; + struct dma_fence *last_update; + + if (!syncobj) + return; + + /* Find the last update fence */ + switch (operation) { + case AMDGPU_VA_OP_MAP: + case AMDGPU_VA_OP_REPLACE: + if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)) + last_update = vm->last_update; + else + last_update = bo_va->last_pt_update; + break; + case AMDGPU_VA_OP_UNMAP: + case AMDGPU_VA_OP_CLEAR: + last_update = fence; + break; + default: + return; + } + + /* Add fence to timeline */ + if (!point) + drm_syncobj_replace_fence(syncobj, last_update); + else + drm_syncobj_add_point(syncobj, chain, last_update, point); +} + static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -184,6 +293,15 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, bo_va = amdgpu_vm_bo_add(adev, vm, abo); else ++bo_va->ref_count; + + /* attach gfx eviction fence */ + r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo); + if (r) { + DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n"); + amdgpu_bo_unreserve(abo); + return r; + } + amdgpu_bo_unreserve(abo); /* Validate and add eviction fence to DMABuf imports with dynamic @@ -199,8 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, */ if (!vm->is_compute_context || !vm->process_info) return 0; - if (!obj->import_attach || - !dma_buf_is_dynamic(obj->import_attach->dmabuf)) + if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf)) return 0; mutex_lock_nested(&vm->process_info->lock, 1); if (!WARN_ON(!vm->process_info->eviction_fence)) { @@ -211,7 +328,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, dev_warn(adev->dev, "validate_and_fence failed: %d\n", r); if (ti) { - dev_warn(adev->dev, "pid %d\n", ti->pid); + dev_warn(adev->dev, "pid %d\n", ti->task.pid); amdgpu_vm_put_task_info(ti); } } @@ -247,6 +364,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, goto out_unlock; } + if (!amdgpu_vm_is_bo_always_valid(vm, bo)) + amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo); + bo_va = amdgpu_vm_bo_find(vm, bo); if (!bo_va || --bo_va->ref_count) goto out_unlock; @@ -321,10 +441,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, uint32_t handle, initial_domain; int r; - /* reject DOORBELLs until userspace code to use it is available */ - if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL) - return -EINVAL; - /* reject invalid gem flags */ if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | @@ -638,18 +754,23 @@ out: * * Update the bo_va directly after setting its address. Errors are not * vital here, so they are not reported back to userspace. + * + * Returns resulting fence if freed BO(s) got cleared from the PT. + * otherwise stub fence in case of error. */ -static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_bo_va *bo_va, - uint32_t operation) +static struct dma_fence * +amdgpu_gem_va_update_vm(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_bo_va *bo_va, + uint32_t operation) { + struct dma_fence *fence = dma_fence_get_stub(); int r; if (!amdgpu_vm_ready(vm)) - return; + return fence; - r = amdgpu_vm_clear_freed(adev, vm, NULL); + r = amdgpu_vm_clear_freed(adev, vm, &fence); if (r) goto error; @@ -665,6 +786,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); + + return fence; } /** @@ -713,6 +836,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; + struct drm_syncobj *timeline_syncobj = NULL; + struct dma_fence_chain *timeline_chain = NULL; + struct dma_fence *fence; struct drm_exec exec; uint64_t va_flags; uint64_t vm_size; @@ -774,6 +900,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, abo = NULL; } + r = amdgpu_gem_add_input_fence(filp, + args->input_fence_syncobj_handles, + args->num_syncobj_handles); + if (r) + goto error_put_gobj; + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { @@ -802,6 +934,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, bo_va = NULL; } + r = amdgpu_gem_update_timeline_node(filp, + args->vm_timeline_syncobj_out, + args->vm_timeline_point, + &timeline_syncobj, + &timeline_chain); + if (r) + goto error; + switch (args->operation) { case AMDGPU_VA_OP_MAP: va_flags = amdgpu_gem_va_map_flags(adev, args->flags); @@ -827,12 +967,24 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) - amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, - args->operation); + if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) { + fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, + args->operation); + + if (timeline_syncobj) + amdgpu_gem_update_bo_mapping(filp, bo_va, + args->operation, + args->vm_timeline_point, + fence, timeline_syncobj, + timeline_chain); + else + dma_fence_put(fence); + + } error: drm_exec_fini(&exec); +error_put_gobj: drm_gem_object_put(gobj); return r; } @@ -871,7 +1023,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->tbo.base.import_attach && + if (drm_gem_is_imported(&robj->tbo.base) && args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; amdgpu_bo_unreserve(robj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 72af5e5a894a..c80c8f543532 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -33,6 +33,7 @@ #include "amdgpu_reset.h" #include "amdgpu_xcp.h" #include "amdgpu_xgmi.h" +#include "nvd.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -74,14 +75,15 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, adev->gfx.mec_bitmap[xcc_id].queue_bitmap); } -int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, - int me, int pipe, int queue) +static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, + int me, int pipe, int queue) { + int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ int bit = 0; bit += me * adev->gfx.me.num_pipe_per_me - * adev->gfx.me.num_queue_per_pipe; - bit += pipe * adev->gfx.me.num_queue_per_pipe; + * num_queue_per_pipe; + bit += pipe * num_queue_per_pipe; bit += queue; return bit; @@ -147,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) { if (amdgpu_compute_multipipe != -1) { - DRM_INFO("amdgpu: forcing compute pipe policy %d\n", + dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n", amdgpu_compute_multipipe); return amdgpu_compute_multipipe == 1; } @@ -238,8 +240,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) { int i, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); - int max_queues_per_me = adev->gfx.me.num_pipe_per_me * - adev->gfx.me.num_queue_per_pipe; + int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ + int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe; if (multipipe_policy) { /* policy: amdgpu owns the first queue per pipe at this stage @@ -247,9 +249,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) for (i = 0; i < max_queues_per_me; i++) { pipe = i % adev->gfx.me.num_pipe_per_me; queue = (i / adev->gfx.me.num_pipe_per_me) % - adev->gfx.me.num_queue_per_pipe; + num_queue_per_pipe; - set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue, + set_bit(pipe * num_queue_per_pipe + queue, adev->gfx.me.queue_bitmap); } } else { @@ -258,8 +260,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) } /* update the number of active graphics rings */ - adev->gfx.num_gfx_rings = - bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); + if (adev->gfx.num_gfx_rings) + adev->gfx.num_gfx_rings = + bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); } static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, @@ -671,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ if (WARN_ON(i > (sizeof(queue_mask)*8))) { - DRM_ERROR("Invalid KCQ enabled: %d\n", i); + dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i); break; } @@ -680,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) amdgpu_device_flush_hdp(adev, NULL); - DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, - kiq_ring->queue); + dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me, + kiq_ring->pipe, kiq_ring->queue); spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); spin_unlock(&kiq->ring_lock); return r; } @@ -709,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KCQ enable failed\n"); + dev_err(adev->dev, "KCQ enable failed\n"); return r; } @@ -731,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_mes_map_legacy_queue(adev, &adev->gfx.gfx_ring[j]); if (r) { - DRM_ERROR("failed to map gfx queue\n"); + dev_err(adev->dev, "failed to map gfx queue\n"); return r; } } @@ -745,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_gfx_rings); if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); spin_unlock(&kiq->ring_lock); return r; } @@ -766,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KGQ enable failed\n"); + dev_err(adev->dev, "KGQ enable failed\n"); return r; } @@ -1027,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, ih_data.head = *ras_if; - DRM_ERROR("CP ECC ERROR IRQ\n"); + dev_err(adev->dev, "CP ECC ERROR IRQ\n"); amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -1351,6 +1354,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); int mode; + /* Only minimal precaution taken to reject requests while in reset.*/ + if (amdgpu_in_reset(adev)) + return -EPERM; + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE); @@ -1394,8 +1401,14 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } + /* Don't allow a switch while under reset */ + if (!down_read_trylock(&adev->reset_domain->sem)) + return -EPERM; + ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); + up_read(&adev->reset_domain->sem); + if (ret) return ret; @@ -1438,9 +1451,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct drm_gpu_scheduler *sched = &ring->sched; struct drm_sched_entity entity; + static atomic_t counter; struct dma_fence *f; struct amdgpu_job *job; struct amdgpu_ib *ib; + void *owner; int i, r; /* Initialize the scheduler entity */ @@ -1451,13 +1466,21 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) goto err; } - r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL, - 64, 0, - &job); + /* + * Use some unique dummy value as the owner to make sure we execute + * the cleaner shader on each submission. The value just need to change + * for each submission and is otherwise meaningless. + */ + owner = (void *)(unsigned long)atomic_inc_return(&counter); + + r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner, + 64, 0, &job); if (r) goto err; job->enforce_isolation = true; + /* always run the cleaner shader */ + job->run_cleaner_shader = true; ib = &job->ibs[0]; for (i = 0; i <= ring->funcs->align_mask; ++i) @@ -1544,6 +1567,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; + if (adev->gfx.disable_kq) + return -EPERM; + ret = kstrtol(buf, 0, &value); if (ret) @@ -1586,7 +1612,8 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, * Provides the sysfs read interface to get the current settings of the 'enforce_isolation' * feature for each GPU partition. Reading from the 'enforce_isolation' * sysfs file returns the isolation settings for all partitions, where '0' - * indicates disabled and '1' indicates enabled. + * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode, + * and '3' indicates enabled without cleaner shader. * * Return: The number of bytes read from the sysfs file. */ @@ -1621,9 +1648,12 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev, * @count: The size of the input data * * This function allows control over the 'enforce_isolation' feature, which - * serializes access to the graphics engine. Writing '1' or '0' to the - * 'enforce_isolation' sysfs file enables or disables process isolation for - * each partition. The input should specify the setting for all partitions. + * serializes access to the graphics engine. Writing '0' to disable, '1' to + * enable isolation with cleaner shader, '2' to enable legacy isolation without + * cleaner shader, or '3' to enable process isolation without submitting the + * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode + * for each partition. The input should specify the setting for all + * partitions. * * Return: The number of bytes written to the sysfs file. */ @@ -1660,13 +1690,34 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, return -EINVAL; for (i = 0; i < num_partitions; i++) { - if (partition_values[i] != 0 && partition_values[i] != 1) + if (partition_values[i] != 0 && + partition_values[i] != 1 && + partition_values[i] != 2 && + partition_values[i] != 3) return -EINVAL; } mutex_lock(&adev->enforce_isolation_mutex); - for (i = 0; i < num_partitions; i++) - adev->enforce_isolation[i] = partition_values[i]; + for (i = 0; i < num_partitions; i++) { + switch (partition_values[i]) { + case 0: + default: + adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE; + break; + case 1: + adev->enforce_isolation[i] = + AMDGPU_ENFORCE_ISOLATION_ENABLE; + break; + case 2: + adev->enforce_isolation[i] = + AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY; + break; + case 3: + adev->enforce_isolation[i] = + AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER; + break; + } + } mutex_unlock(&adev->enforce_isolation_mutex); amdgpu_mes_update_enforce_isolation(adev); @@ -1915,39 +1966,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, bool enable) { - mutex_lock(&adev->gfx.kfd_sch_mutex); + mutex_lock(&adev->gfx.userq_sch_mutex); if (enable) { /* If the count is already 0, it means there's an imbalance bug somewhere. * Note that the bug may be in a different caller than the one which triggers the * WARN_ON_ONCE. */ - if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) { + if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) { dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n"); goto unlock; } - adev->gfx.kfd_sch_req_count[idx]--; + adev->gfx.userq_sch_req_count[idx]--; - if (adev->gfx.kfd_sch_req_count[idx] == 0 && - adev->gfx.kfd_sch_inactive[idx]) { + if (adev->gfx.userq_sch_req_count[idx] == 0 && + adev->gfx.userq_sch_inactive[idx]) { schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work, msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx])); } } else { - if (adev->gfx.kfd_sch_req_count[idx] == 0) { + if (adev->gfx.userq_sch_req_count[idx] == 0) { cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); - if (!adev->gfx.kfd_sch_inactive[idx]) { - amdgpu_amdkfd_stop_sched(adev, idx); - adev->gfx.kfd_sch_inactive[idx] = true; + if (!adev->gfx.userq_sch_inactive[idx]) { + amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx); + if (adev->kfd.init_complete) + amdgpu_amdkfd_stop_sched(adev, idx); + adev->gfx.userq_sch_inactive[idx] = true; } } - adev->gfx.kfd_sch_req_count[idx]++; + adev->gfx.userq_sch_req_count[idx]++; } unlock: - mutex_unlock(&adev->gfx.kfd_sch_mutex); + mutex_unlock(&adev->gfx.userq_sch_mutex); } /** @@ -1992,12 +2045,13 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work) msecs_to_jiffies(1)); } else { /* Tell KFD to resume the runqueue */ - if (adev->kfd.init_complete) { - WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]); - WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]); + WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]); + WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]); + + amdgpu_userq_start_sched_for_enforce_isolation(adev, idx); + if (adev->kfd.init_complete) amdgpu_amdkfd_start_sched(adev, idx); - adev->gfx.kfd_sch_inactive[idx] = false; - } + adev->gfx.userq_sch_inactive[idx] = false; } mutex_unlock(&adev->enforce_isolation_mutex); } @@ -2021,7 +2075,7 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev, bool wait = false; mutex_lock(&adev->enforce_isolation_mutex); - if (adev->enforce_isolation[idx]) { + if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { /* set the initial values if nothing is set */ if (!adev->gfx.enforce_isolation_jiffies[idx]) { adev->gfx.enforce_isolation_jiffies[idx] = jiffies; @@ -2088,7 +2142,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring) amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx); mutex_lock(&adev->enforce_isolation_mutex); - if (adev->enforce_isolation[idx]) { + if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { if (adev->kfd.init_complete) sched_work = true; } @@ -2125,7 +2179,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring) return; mutex_lock(&adev->enforce_isolation_mutex); - if (adev->enforce_isolation[idx]) { + if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { if (adev->kfd.init_complete) sched_work = true; } @@ -2174,6 +2228,9 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring) enum PP_SMC_POWER_PROFILE profile; int r; + if (amdgpu_dpm_is_overdrive_enabled(adev)) + return; + if (adev->gfx.num_gfx_rings) profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D; else @@ -2204,11 +2261,84 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; + + if (amdgpu_dpm_is_overdrive_enabled(adev)) + return; + atomic_dec(&ring->adev->gfx.total_submission_cnt); schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT); } +/** + * amdgpu_gfx_csb_preamble_start - Set CSB preamble start + * + * @buffer: This is an output variable that gets the PACKET3 preamble setup. + * + * Return: + * return the latest index. + */ +u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer) +{ + u32 count = 0; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + return count; +} + +/** + * amdgpu_gfx_csb_data_parser - Parser CS data + * + * @adev: amdgpu_device pointer used to get the CS data and other gfx info. + * @buffer: This is an output variable that gets the PACKET3 preamble end. + * @count: Index to start set the preemble end. + * + * Return: + * return the latest index. + */ +u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count) +{ + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + u32 i; + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START); + + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } + } + } + + return count; +} + +/** + * amdgpu_gfx_csb_preamble_end - Set CSB preamble end + * + * @buffer: This is an output variable that gets the PACKET3 preamble end. + * @count: Index to start set the preemble end. + */ +void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count) +{ + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + /* * debugfs for to enable/disable gfx job submission to specific core. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 87e862188766..08f268dab8f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -170,10 +170,46 @@ struct amdgpu_kiq { #define AMDGPU_GFX_MAX_SE 4 #define AMDGPU_GFX_MAX_SH_PER_SE 2 +/** + * amdgpu_rb_config - Configure a single Render Backend (RB) + * + * Bad RBs are fused off and there is a harvest register the driver reads to + * determine which RB(s) are fused off so that the driver can configure the + * hardware state so that nothing gets sent to them. There are also user + * harvest registers that the driver can program to disable additional RBs, + * etc., for testing purposes. + */ struct amdgpu_rb_config { + /** + * @rb_backend_disable: + * + * The value captured from register RB_BACKEND_DISABLE indicates if the + * RB backend is disabled or not. + */ uint32_t rb_backend_disable; + + /** + * @user_rb_backend_disable: + * + * The value captured from register USER_RB_BACKEND_DISABLE indicates + * if the User RB backend is disabled or not. + */ uint32_t user_rb_backend_disable; + + /** + * @raster_config: + * + * To set up all of the states, it is necessary to have two registers + * to keep all of the states. This field holds the first register. + */ uint32_t raster_config; + + /** + * @raster_config_1: + * + * To set up all of the states, it is necessary to have two registers + * to keep all of the states. This field holds the second register. + */ uint32_t raster_config_1; }; @@ -221,6 +257,13 @@ struct amdgpu_gfx_config { uint32_t macrotile_mode_array[16]; struct gb_addr_config gb_addr_config_fields; + + /** + * @rb_config: + * + * Matrix that keeps all the Render Backend (color and depth buffer + * handling) configuration on the 3D engine. + */ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; /* gfx configure feature */ @@ -305,7 +348,8 @@ struct amdgpu_gfx_funcs { void (*init_spm_golden)(struct amdgpu_device *adev); void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); int (*get_gfx_shadow_info)(struct amdgpu_device *adev, - struct amdgpu_gfx_shadow_info *shadow_info); + struct amdgpu_gfx_shadow_info *shadow_info, + bool skip_check); enum amdgpu_gfx_partition (*query_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, @@ -474,9 +518,9 @@ struct amdgpu_gfx { bool enable_cleaner_shader; struct amdgpu_isolation_work enforce_isolation[MAX_XCP]; /* Mutex for synchronizing KFD scheduler operations */ - struct mutex kfd_sch_mutex; - u64 kfd_sch_req_count[MAX_XCP]; - bool kfd_sch_inactive[MAX_XCP]; + struct mutex userq_sch_mutex; + u64 userq_sch_req_count[MAX_XCP]; + bool userq_sch_inactive[MAX_XCP]; unsigned long enforce_isolation_jiffies[MAX_XCP]; unsigned long enforce_isolation_time[MAX_XCP]; @@ -484,6 +528,9 @@ struct amdgpu_gfx { struct delayed_work idle_work; bool workload_profile_active; struct mutex workload_profile_mutex; + + bool disable_kq; + bool disable_uq; }; struct amdgpu_gfx_ras_reg_entry { @@ -503,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry { #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) -#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) +#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false)) /** * amdgpu_gfx_create_bitmask - create a bitmask @@ -550,8 +597,6 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); -int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, - int pipe, int queue); bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); @@ -597,6 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work); void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring); +u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer); +u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count); +void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count); void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 464625282872..97b562a79ea8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -38,6 +38,13 @@ #include <drm/drm_drv.h> #include <drm/ttm/ttm_tt.h> +static const u64 four_gb = 0x100000000ULL; + +bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev) +{ + return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev); +} + /** * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 * @@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; - mc->gart_start = hive_vram_end + 1; + /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */ + mc->gart_start = ALIGN(hive_vram_end + 1, four_gb); mc->gart_end = mc->gart_start + mc->gart_size - 1; - mc->fb_start = hive_vram_start; - mc->fb_end = hive_vram_end; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) { + /* set mc->vram_start to 0 to switch the returned GPU address of + * amdgpu_bo_create_reserved() from FB aperture to GART aperture. + */ + mc->vram_start = 0; + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size); + } else { + mc->fb_start = hive_vram_start; + mc->fb_end = hive_vram_end; + } dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); @@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, enum amdgpu_gart_placement gart_placement) { - const uint64_t four_gb = 0x100000000ULL; u64 size_af, size_bf; /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); @@ -699,12 +715,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, uint32_t flush_type, bool all_hub, uint32_t inst) { - u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : - adev->usec_timeout; struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; unsigned int ndw; - int r; + int r, cnt = 0; uint32_t seq; /* @@ -761,10 +775,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, amdgpu_ring_commit(ring); spin_unlock(&adev->gfx.kiq[inst].ring_lock); - if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) { + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY && + !amdgpu_reset_pending(adev->reset_domain)) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) { dev_err(adev->dev, "timeout waiting for kiq fence\n"); r = -ETIME; - } + } else + r = 0; } error_unlock_reset: @@ -1032,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) */ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; - u64 vram_addr = adev->vm_manager.vram_base_offset - - adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - u64 vram_end = vram_addr + vram_size; + u64 vram_addr, vram_end; u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); int idx; @@ -1047,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); flags |= AMDGPU_PDE_PTE_FLAG(adev); + vram_addr = adev->vm_manager.vram_base_offset; + if (!amdgpu_virt_xgmi_migrate_enabled(adev)) + vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + vram_end = vram_addr + vram_size; + /* The first n PDE0 entries are used as PTE, * pointing to vram */ @@ -1221,6 +1249,10 @@ static ssize_t current_memory_partition_show( struct amdgpu_device *adev = drm_to_adev(ddev); enum amdgpu_memory_partition mode; + /* Only minimal precaution taken to reject requests while in reset */ + if (amdgpu_in_reset(adev)) + return -EPERM; + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); if ((mode >= ARRAY_SIZE(nps_desc)) || (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode)) @@ -1416,3 +1448,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) return false; } + +enum amdgpu_memory_partition +amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev) +{ + switch (adev->gmc.num_mem_partitions) { + case 0: + return UNKNOWN_MEMORY_PARTITION_MODE; + case 1: + return AMDGPU_NPS1_PARTITION_MODE; + case 2: + return AMDGPU_NPS2_PARTITION_MODE; + case 4: + return AMDGPU_NPS4_PARTITION_MODE; + case 8: + return AMDGPU_NPS8_PARTITION_MODE; + default: + return AMDGPU_NPS1_PARTITION_MODE; + } +} + +enum amdgpu_memory_partition +amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs && + adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev, + supp_modes); + else + dev_warn(adev->dev, "memory partition mode query is not supported\n"); + + return mode; +} + +enum amdgpu_memory_partition +amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return amdgpu_gmc_get_vf_memory_partition(adev); + else + return amdgpu_gmc_get_memory_partition(adev, NULL); +} + +static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode; + u32 supp_modes; + bool valid; + + mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); + + /* Mode detected by hardware not present in supported modes */ + if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && + !(BIT(mode - 1) & supp_modes)) + return false; + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + case AMDGPU_NPS1_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 1); + break; + case AMDGPU_NPS2_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 2); + break; + case AMDGPU_NPS4_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 3 || + adev->gmc.num_mem_partitions == 4); + break; + case AMDGPU_NPS8_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 8); + break; + default: + valid = false; + } + + return valid; +} + +static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid) +{ + int i; + + /* Check if node with id 'nid' is present in 'node_ids' array */ + for (i = 0; i < num_ids; ++i) + if (node_ids[i] == nid) + return true; + + return false; +} + +static void +amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + struct amdgpu_numa_info numa_info; + int node_ids[AMDGPU_MAX_MEM_RANGES]; + int num_ranges = 0, ret; + int num_xcc, xcc_id; + uint32_t xcc_mask; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + xcc_mask = (1U << num_xcc) - 1; + + for_each_inst(xcc_id, xcc_mask) { + ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); + if (ret) + continue; + + if (numa_info.nid == NUMA_NO_NODE) { + mem_ranges[0].size = numa_info.size; + mem_ranges[0].numa.node = numa_info.nid; + num_ranges = 1; + break; + } + + if (amdgpu_gmc_is_node_present(node_ids, num_ranges, + numa_info.nid)) + continue; + + node_ids[num_ranges] = numa_info.nid; + mem_ranges[num_ranges].numa.node = numa_info.nid; + mem_ranges[num_ranges].size = numa_info.size; + ++num_ranges; + } + + adev->gmc.num_mem_partitions = num_ranges; +} + +void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + enum amdgpu_memory_partition mode; + u32 start_addr = 0, size; + int i, r, l; + + mode = amdgpu_gmc_query_memory_partition(adev); + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + adev->gmc.num_mem_partitions = 0; + break; + case AMDGPU_NPS1_PARTITION_MODE: + adev->gmc.num_mem_partitions = 1; + break; + case AMDGPU_NPS2_PARTITION_MODE: + adev->gmc.num_mem_partitions = 2; + break; + case AMDGPU_NPS4_PARTITION_MODE: + if (adev->flags & AMD_IS_APU) + adev->gmc.num_mem_partitions = 3; + else + adev->gmc.num_mem_partitions = 4; + break; + case AMDGPU_NPS8_PARTITION_MODE: + adev->gmc.num_mem_partitions = 8; + break; + default: + adev->gmc.num_mem_partitions = 1; + break; + } + + /* Use NPS range info, if populated */ + r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, + &adev->gmc.num_mem_partitions); + if (!r) { + l = 0; + for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { + if (mem_ranges[i].range.lpfn > + mem_ranges[i - 1].range.lpfn) + l = i; + } + + } else { + if (!adev->gmc.num_mem_partitions) { + dev_warn(adev->dev, + "Not able to detect NPS mode, fall back to NPS1\n"); + adev->gmc.num_mem_partitions = 1; + } + /* Fallback to sw based calculation */ + size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; + size /= adev->gmc.num_mem_partitions; + + for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { + mem_ranges[i].range.fpfn = start_addr; + mem_ranges[i].size = + ((u64)size << AMDGPU_GPU_PAGE_SHIFT); + mem_ranges[i].range.lpfn = start_addr + size - 1; + start_addr += size; + } + + l = adev->gmc.num_mem_partitions - 1; + } + + /* Adjust the last one */ + mem_ranges[l].range.lpfn = + (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; + mem_ranges[l].size = + adev->gmc.real_vram_size - + ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT); +} + +int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev) +{ + bool valid; + + adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES, + sizeof(struct amdgpu_mem_partition_info), + GFP_KERNEL); + if (!adev->gmc.mem_partitions) + return -ENOMEM; + + if (adev->gmc.is_app_apu) + amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); + else + amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + + if (amdgpu_sriov_vf(adev)) + valid = true; + else + valid = amdgpu_gmc_validate_partition_info(adev); + if (!valid) { + /* TODO: handle invalid case */ + dev_warn(adev->dev, + "Mem ranges not matching with hardware config\n"); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index bd7fc123b8f9..397c6ccdb903 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -62,6 +62,9 @@ */ #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL +/* XNACK flags */ +#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0) + struct firmware; enum amdgpu_memory_partition { @@ -81,6 +84,8 @@ enum amdgpu_memory_partition { #define AMDGPU_GMC_INIT_RESET_NPS BIT(0) +#define AMDGPU_MAX_MEM_RANGES 8 + /* * GMC page fault information */ @@ -301,6 +306,7 @@ struct amdgpu_gmc { struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; int noretry; + uint32_t xnack_flags; uint32_t vmid0_page_table_block_size; uint32_t vmid0_page_table_depth; @@ -390,6 +396,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) return addr; } +bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev); int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev); void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); @@ -451,5 +458,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, int nps_mode); void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); - +enum amdgpu_memory_partition +amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev); +enum amdgpu_memory_partition +amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes); +enum amdgpu_memory_partition +amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev); +int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev); +void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c index b6cf801939aa..6e02fb9ac2f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -22,6 +22,7 @@ */ #include "amdgpu.h" #include "amdgpu_ras.h" +#include <uapi/linux/kfd_ioctl.h> int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev) { @@ -46,3 +47,22 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev) /* hdp ras follows amdgpu_ras_block_late_init_default for late init */ return 0; } + +void amdgpu_hdp_generic_flush(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) { + WREG32((adev->rmmio_remap.reg_offset + + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> + 2, + 0); + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); + } else { + amdgpu_ring_emit_wreg(ring, + (adev->rmmio_remap.reg_offset + + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> + 2, + 0); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index 7b8a6152dc8d..4cfd932b7e91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -44,4 +44,6 @@ struct amdgpu_hdp { }; int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev); +void amdgpu_hdp_generic_flush(struct amdgpu_device *adev, + struct amdgpu_ring *ring); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 8179d0814db9..57101d24422f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -24,7 +24,6 @@ * Alex Deucher */ -#include <linux/export.h> #include <linux/pci.h> #include <drm/drm_edid.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 2ea98ec60220..7d9bcb72e8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; + struct amdgpu_fence *af; bool need_ctx_switch; struct amdgpu_vm *vm; uint64_t fence_ctx; @@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, int vmid = AMDGPU_JOB_GET_VMID(job); bool need_pipe_sync = false; unsigned int cond_exec; - unsigned int i; int r = 0; @@ -154,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = job->csa_va; gds_va = job->gds_va; init_shadow = job->init_shadow; + af = &job->hw_fence; + /* Save the context of the job for reset handling. + * The driver needs this so it can skip the ring + * contents for guilty contexts. + */ + af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; } else { vm = NULL; fence_ctx = 0; @@ -161,14 +167,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = 0; gds_va = 0; init_shadow = false; + af = NULL; } - if (!ring->sched.ready && !ring->is_mes_queue) { + if (!ring->sched.ready) { dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } - if (vm && !job->vmid && !ring->is_mes_queue) { + if (vm && !job->vmid) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } @@ -282,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } - r = amdgpu_fence_emit(ring, f, job, fence_flags); + r = amdgpu_fence_emit(ring, f, af, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -304,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) ring->funcs->emit_wave_limit(ring, false); + /* Save the wptr associated with this fence. + * This must be last for resets to work properly + * as we need to save the wptr associated with this + * fence so we know what rings contents to backup + * after we reset the queue. + */ + amdgpu_fence_save_wptr(*f); + amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 4c4e087230ac..5dd78a9cb12d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -576,8 +576,16 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) INIT_LIST_HEAD(&id_mgr->ids_lru); id_mgr->reserved_use_count = 0; - /* manage only VMIDs not used by KFD */ - id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; + /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) + /* manage only VMIDs not used by KFD */ + id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; + else if (AMDGPU_IS_MMHUB0(i) || + AMDGPU_IS_MMHUB1(i)) + id_mgr->num_ids = 16; + else + /* manage only VMIDs not used by KFD */ + id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { @@ -588,7 +596,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) } /* alloc a default reserved vmid to enforce isolation */ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { - if (adev->enforce_isolation[i]) + if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE) amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 901f8b12c672..a6419246e9c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_ih.h" +#include "amdgpu_reset.h" /** * amdgpu_ih_ring_init - initialize the IH state @@ -217,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) restart_ih: count = AMDGPU_IH_MAX_NUM_IVS; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); + dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); @@ -227,13 +228,23 @@ restart_ih: ih->rptr &= ih->ptr_mask; } - amdgpu_ih_set_rptr(adev, ih); + if (!ih->overflow) + amdgpu_ih_set_rptr(adev, ih); + wake_up_all(&ih->wait_process); /* make sure wptr hasn't changed while processing */ wptr = amdgpu_ih_get_wptr(adev, ih); if (wptr != ih->rptr) - goto restart_ih; + if (!ih->overflow) + goto restart_ih; + + if (ih->overflow) + if (amdgpu_sriov_runtime(adev)) + WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index b0a88f92cd82..7f7ea046e209 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -72,6 +72,7 @@ struct amdgpu_ih_ring { /* For waiting on IH processing at checkpoint. */ wait_queue_head_t wait_process; uint64_t processed_timestamp; + bool overflow; }; /* return true if time stamp t2 is after t1 with 48bit wrap around */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c new file mode 100644 index 000000000000..99e1cf4fc955 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c @@ -0,0 +1,96 @@ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ip.h" + +static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst) +{ + int8_t dev_inst; + + switch (block) { + case GC_HWIP: + case SDMA0_HWIP: + /* Both JPEG and VCN as JPEG is only alias of VCN */ + case VCN_HWIP: + dev_inst = adev->ip_map.dev_inst[block][inst]; + break; + default: + /* For rest of the IPs, no look up required. + * Assume 'logical instance == physical instance' for all configs. */ + dev_inst = inst; + break; + } + + return dev_inst; +} + +static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask) +{ + uint32_t dev_mask = 0; + int8_t log_inst, dev_inst; + + while (mask) { + log_inst = ffs(mask) - 1; + dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst); + dev_mask |= (1 << dev_inst); + mask &= ~(1 << log_inst); + } + + return dev_mask; +} + +static void amdgpu_populate_ip_map(struct amdgpu_device *adev, + enum amd_hw_ip_block_type ip_block, + uint32_t inst_mask) +{ + int l = 0, i; + + while (inst_mask) { + i = ffs(inst_mask) - 1; + adev->ip_map.dev_inst[ip_block][l++] = i; + inst_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[ip_block][l] = -1; +} + +void amdgpu_ip_map_init(struct amdgpu_device *adev) +{ + u32 ip_map[][2] = { + { GC_HWIP, adev->gfx.xcc_mask }, + { SDMA0_HWIP, adev->sdma.sdma_mask }, + { VCN_HWIP, adev->vcn.inst_mask }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ip_map); ++i) + amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); + + adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst; + adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h new file mode 100644 index 000000000000..2490fd322aec --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h @@ -0,0 +1,29 @@ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_IP_H__ +#define __AMDGPU_IP_H__ + +void amdgpu_ip_map_init(struct amdgpu_device *adev); + +#endif /* __AMDGPU_IP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 19ce4da285e8..8112ffc85995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) r = src->funcs->set(adev, src, k, AMDGPU_IRQ_STATE_DISABLE); if (r) - DRM_ERROR("error disabling interrupt (%d)\n", - r); + dev_err(adev->dev, + "error disabling interrupt (%d)\n", + r); } } } @@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } -static void amdgpu_restore_msix(struct amdgpu_device *adev) +void amdgpu_restore_msix(struct amdgpu_device *adev) { u16 ctrl; @@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.irq = irq; adev_to_drm(adev)->max_vblank_count = 0x00ffffff; - DRM_DEBUG("amdgpu: irq initialized.\n"); + dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); return 0; free_vectors: @@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, src_id = entry.src_id; if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { - DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); + dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id); } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { - DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); + dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id); } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || (client_id == SOC15_IH_CLIENTID_ISP)) && @@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, generic_handle_domain_irq(adev->irq.domain, src_id); } else if (!adev->irq.client[client_id].sources) { - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", - client_id, src_id); + dev_dbg(adev->dev, + "Unregistered interrupt client_id: %d src_id: %d\n", + client_id, src_id); } else if ((src = adev->irq.client[client_id].sources[src_id])) { r = src->funcs->process(adev, src, &entry); if (r < 0) - DRM_ERROR("error processing interrupt (%d)\n", r); + dev_err(adev->dev, "error processing interrupt (%d)\n", + r); else if (r) handled = true; } else { - DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n", + dev_dbg(adev->dev, + "Unregistered interrupt src_id: %d of client_id:%d\n", src_id, client_id); } @@ -619,6 +623,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type) { + /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */ + if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type)) + return -EINVAL; + if (!adev->irq.installed) return -ENOENT; @@ -725,10 +733,10 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { */ int amdgpu_irq_add_domain(struct amdgpu_device *adev) { - adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, - &amdgpu_hw_irqdomain_ops, adev); + adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, + &amdgpu_hw_irqdomain_ops, adev); if (!adev->irq.domain) { - DRM_ERROR("GPU irq add domain failed\n"); + dev_err(adev->dev, "GPU irq add domain failed\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 04c0b4fa17a4..9f0417456abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id); +void amdgpu_restore_msix(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 43fc941dfa57..9cddbf50442a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -33,6 +33,8 @@ #include "isp_v4_1_0.h" #include "isp_v4_1_1.h" +#define ISP_MC_ADDR_ALIGN (1024 * 32) + /** * isp_hw_init - start and test isp block * @@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block, return 0; } +static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev) +{ + if (isp_parent != amdgpu_dev) + return -EINVAL; + + return 0; +} + +/** + * isp_user_buffer_alloc - create user buffer object (BO) for isp + * + * @dev: isp device handle + * @dmabuf: DMABUF handle for isp buffer allocated in system memory + * @buf_obj: GPU buffer object handle to initialize + * @buf_addr: GPU addr of the pinned BO to initialize + * + * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does + * GART alloc to generate GPU addr for BO to make it accessible through the + * GART aperture for ISP HW. + * + * This function is exported to allow the V4L2 isp device external to drm device + * to create and access the isp user BO. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int isp_user_buffer_alloc(struct device *dev, void *dmabuf, + void **buf_obj, u64 *buf_addr) +{ + struct platform_device *ispdev = to_platform_device(dev); + const struct isp_platform_data *isp_pdata; + struct amdgpu_device *adev; + struct mfd_cell *mfd_cell; + struct amdgpu_bo *bo; + u64 gpu_addr; + int ret; + + if (WARN_ON(!ispdev)) + return -ENODEV; + + if (WARN_ON(!buf_obj)) + return -EINVAL; + + if (WARN_ON(!buf_addr)) + return -EINVAL; + + mfd_cell = &ispdev->mfd_cell[0]; + if (!mfd_cell) + return -ENODEV; + + isp_pdata = mfd_cell->platform_data; + adev = isp_pdata->adev; + + ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); + if (ret) + return ret; + + ret = amdgpu_bo_create_isp_user(adev, dmabuf, + AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr); + if (ret) { + drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret); + return ret; + } + + *buf_obj = (void *)bo; + *buf_addr = gpu_addr; + + return 0; +} +EXPORT_SYMBOL(isp_user_buffer_alloc); + +/** + * isp_user_buffer_free - free isp user buffer object (BO) + * + * @buf_obj: amdgpu isp user BO to free + * + * unpin and unref BO for isp internal use. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to free the isp user BO. + */ +void isp_user_buffer_free(void *buf_obj) +{ + amdgpu_bo_free_isp_user(buf_obj); +} +EXPORT_SYMBOL(isp_user_buffer_free); + +/** + * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp + * + * @dev: isp device handle + * @size: size for the new BO + * @buf_obj: GPU BO handle to initialize + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: CPU address mapping of BO + * + * Allocates and pins a kernel BO for internal isp firmware use. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to create and access the kernel BO. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int isp_kernel_buffer_alloc(struct device *dev, u64 size, + void **buf_obj, u64 *gpu_addr, void **cpu_addr) +{ + struct platform_device *ispdev = to_platform_device(dev); + struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; + const struct isp_platform_data *isp_pdata; + struct amdgpu_device *adev; + struct mfd_cell *mfd_cell; + int ret; + + if (WARN_ON(!ispdev)) + return -ENODEV; + + if (WARN_ON(!buf_obj)) + return -EINVAL; + + if (WARN_ON(!gpu_addr)) + return -EINVAL; + + if (WARN_ON(!cpu_addr)) + return -EINVAL; + + mfd_cell = &ispdev->mfd_cell[0]; + if (!mfd_cell) + return -ENODEV; + + isp_pdata = mfd_cell->platform_data; + adev = isp_pdata->adev; + + ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); + if (ret) + return ret; + + ret = amdgpu_bo_create_kernel(adev, + size, + ISP_MC_ADDR_ALIGN, + AMDGPU_GEM_DOMAIN_GTT, + bo, + gpu_addr, + cpu_addr); + if (!cpu_addr || ret) { + drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(isp_kernel_buffer_alloc); + +/** + * isp_kernel_buffer_free - free isp kernel buffer object (BO) + * + * @buf_obj: amdgpu isp user BO to free + * @gpu_addr: GPU addr of isp kernel BO + * @cpu_addr: CPU addr of isp kernel BO + * + * unmaps and unpin a isp kernel BO. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to free the kernel BO. + */ +void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr) +{ + struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; + + amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr); +} +EXPORT_SYMBOL(isp_kernel_buffer_free); + static const struct amd_ip_funcs isp_ip_funcs = { .name = "isp_ip", .early_init = isp_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h index 4f3b7b5d9c1f..d6f4ffa4c97c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h @@ -28,16 +28,13 @@ #ifndef __AMDGPU_ISP_H__ #define __AMDGPU_ISP_H__ +#include <drm/amd/isp.h> +#include <linux/pm_domain.h> + #define ISP_REGS_OFFSET_END 0x629A4 struct amdgpu_isp; -struct isp_platform_data { - void *adev; - u32 asic_type; - resource_size_t base_rmmio_size; -}; - struct isp_funcs { int (*hw_init)(struct amdgpu_isp *isp); int (*hw_fini)(struct amdgpu_isp *isp); @@ -54,6 +51,7 @@ struct amdgpu_isp { struct isp_platform_data *isp_pdata; unsigned int harvest_config; const struct firmware *fw; + struct generic_pm_domain ispgpd; }; extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index acb21fc8b3ce..e6061d45f142 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); - struct amdgpu_task_info *ti; + struct drm_wedge_task_info *info = NULL; + struct amdgpu_task_info *ti = NULL; struct amdgpu_device *adev = ring->adev; - int idx; - int r; + int idx, r; if (!drm_dev_enter(adev_to_drm(adev), &idx)) { dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", @@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) amdgpu_job_core_dump(adev, job); if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { dev_err(adev->dev, "ring %s timeout, but soft recovered\n", s_job->sched->name); @@ -124,53 +125,30 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); if (ti) { - dev_err(adev->dev, - "Process information: process %s pid %d thread %s pid %d\n", - ti->process_name, ti->tgid, ti->task_name, ti->pid); - amdgpu_vm_put_task_info(ti); + amdgpu_vm_print_task_info(adev, ti); + info = &ti->task; } /* attempt a per ring reset */ if (unlikely(adev->debug_disable_gpu_ring_reset)) { dev_err(adev->dev, "Ring reset disabled by debug mask\n"); - } else if (amdgpu_gpu_recovery && ring->funcs->reset) { - bool is_guilty; - - dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); - /* stop the scheduler, but don't mess with the - * bad job yet because if ring reset fails - * we'll fall back to full GPU reset. - */ - drm_sched_wqueue_stop(&ring->sched); - - /* for engine resets, we need to reset the engine, - * but individual queues may be unaffected. - * check here to make sure the accounting is correct. - */ - if (ring->funcs->is_guilty) - is_guilty = ring->funcs->is_guilty(ring); - else - is_guilty = true; - - if (is_guilty) - dma_fence_set_error(&s_job->s_fence->finished, -ETIME); - - r = amdgpu_ring_reset(ring, job->vmid); + } else if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && + ring->funcs->reset) { + dev_err(adev->dev, "Starting %s ring reset\n", + s_job->sched->name); + r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); if (!r) { - if (amdgpu_ring_sched_ready(ring)) - drm_sched_stop(&ring->sched, s_job); - if (is_guilty) { - atomic_inc(&ring->adev->gpu_reset_counter); - amdgpu_fence_driver_force_completion(ring); - } - if (amdgpu_ring_sched_ready(ring)) - drm_sched_start(&ring->sched, 0); - dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name); - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE); + atomic_inc(&ring->adev->gpu_reset_counter); + dev_err(adev->dev, "Ring %s reset succeeded\n", + ring->sched.name); + drm_dev_wedged_event(adev_to_drm(adev), + DRM_WEDGE_RECOVERY_NONE, info); goto exit; } - dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); + dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name); } + dma_fence_set_error(&s_job->s_fence->finished, -ETIME); if (amdgpu_device_should_recover_gpu(ring->adev)) { @@ -198,13 +176,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } exit: + amdgpu_vm_put_task_info(ti); drm_dev_exit(idx); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_sched_entity *entity, void *owner, - unsigned int num_ibs, struct amdgpu_job **job) + unsigned int num_ibs, struct amdgpu_job **job, + u64 drm_client_id) { if (num_ibs == 0) return -EINVAL; @@ -222,7 +202,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!entity) return 0; - return drm_sched_job_init(&(*job)->base, entity, 1, owner); + return drm_sched_job_init(&(*job)->base, entity, 1, owner, + drm_client_id); } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, @@ -232,7 +213,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, { int r; - r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0); if (r) return r; @@ -272,8 +253,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) /* Check if any fences where initialized */ if (job->base.s_fence && job->base.s_fence->finished.ops) f = &job->base.s_fence->finished; - else if (job->hw_fence.ops) - f = &job->hw_fence; + else if (job->hw_fence.base.ops) + f = &job->hw_fence.base; else f = NULL; @@ -290,10 +271,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); /* only put the hw fence if has embedded fence */ - if (!job->hw_fence.ops) + if (!job->hw_fence.base.ops) kfree(job); else - dma_fence_put(&job->hw_fence); + dma_fence_put(&job->hw_fence.base); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -322,10 +303,10 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (!job->hw_fence.ops) + if (!job->hw_fence.base.ops) kfree(job); else - dma_fence_put(&job->hw_fence); + dma_fence_put(&job->hw_fence.base); } struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index ce6b9ba967ff..2f302266662b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -48,7 +48,7 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; struct amdgpu_sync explicit_sync; - struct dma_fence hw_fence; + struct amdgpu_fence hw_fence; struct dma_fence *gang_submit; uint32_t preamble_status; uint32_t preemption_status; @@ -78,6 +78,7 @@ struct amdgpu_job { /* enforce isolation */ bool enforce_isolation; + bool run_cleaner_shader; uint32_t num_ibs; struct amdgpu_ib ibs[]; @@ -90,7 +91,8 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_sched_entity *entity, void *owner, - unsigned int num_ibs, struct amdgpu_job **job); + unsigned int num_ibs, struct amdgpu_job **job, + u64 drm_client_id); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index dda29132dfb2..82d58ac7afb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -463,7 +463,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev, adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count, sizeof(uint32_t), GFP_KERNEL); if (!adev->jpeg.ip_dump) { - DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n"); + dev_err(adev->dev, + "Failed to allocate memory for JPEG IP Dump\n"); return -ENOMEM; } adev->jpeg.reg_list = reg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 27bfe9c8af06..8a76960803c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -45,6 +45,7 @@ #include "amdgpu_ras.h" #include "amdgpu_reset.h" #include "amd_pcie.h" +#include "amdgpu_userq.h" void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) { @@ -90,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) return; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD)) DRM_WARN("smart shift update failed\n"); amdgpu_acpi_fini(adev); @@ -160,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) if (acpi_status) dev_dbg(dev->dev, "Error during ACPI methods call\n"); - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD)) DRM_WARN("smart shift update failed\n"); out: @@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, return 0; } +static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev, + struct drm_amdgpu_info *info, + struct drm_amdgpu_info_uq_metadata_gfx *meta) +{ + int ret = -EOPNOTSUPP; + + if (adev->gfx.funcs->get_gfx_shadow_info) { + struct amdgpu_gfx_shadow_info shadow = {}; + + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true); + meta->shadow_size = shadow.shadow_size; + meta->shadow_alignment = shadow.shadow_alignment; + meta->csa_size = shadow.csa_size; + meta->csa_alignment = shadow.csa_alignment; + ret = 0; + } + + return ret; +} + static int amdgpu_hw_ip_info(struct amdgpu_device *adev, struct drm_amdgpu_info *info, struct drm_amdgpu_info_hw_ip *result) @@ -378,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, uint32_t ib_size_alignment = 0; enum amd_ip_block_type type; unsigned int num_rings = 0; + uint32_t num_slots = 0; unsigned int i, j; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -387,24 +409,45 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - if (adev->gfx.gfx_ring[i].sched.ready) + if (adev->gfx.gfx_ring[i].sched.ready && + !adev->gfx.gfx_ring[i].no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) + num_slots += hweight32(adev->mes.gfx_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - if (adev->gfx.compute_ring[i].sched.ready) + if (adev->gfx.compute_ring[i].sched.ready && + !adev->gfx.compute_ring[i].no_user_submission) ++num_rings; + + if (!adev->sdma.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) + num_slots += hweight32(adev->mes.compute_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - if (adev->sdma.instance[i].ring.sched.ready) + if (adev->sdma.instance[i].ring.sched.ready && + !adev->sdma.instance[i].ring.no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) + num_slots += hweight32(adev->mes.sdma_hqd_mask[i]); + } + ib_start_alignment = 256; ib_size_alignment = 4; break; @@ -414,7 +457,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->uvd.harvest_config & (1 << i)) continue; - if (adev->uvd.inst[i].ring.sched.ready) + if (adev->uvd.inst[i].ring.sched.ready && + !adev->uvd.inst[i].ring.no_user_submission) ++num_rings; } ib_start_alignment = 256; @@ -423,7 +467,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - if (adev->vce.ring[i].sched.ready) + if (adev->vce.ring[i].sched.ready && + !adev->vce.ring[i].no_user_submission) ++num_rings; ib_start_alignment = 256; ib_size_alignment = 4; @@ -435,7 +480,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, continue; for (j = 0; j < adev->uvd.num_enc_rings; j++) - if (adev->uvd.inst[i].ring_enc[j].sched.ready) + if (adev->uvd.inst[i].ring_enc[j].sched.ready && + !adev->uvd.inst[i].ring_enc[j].no_user_submission) ++num_rings; } ib_start_alignment = 256; @@ -447,7 +493,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->vcn.inst[i].ring_dec.sched.ready) + if (adev->vcn.inst[i].ring_dec.sched.ready && + !adev->vcn.inst[i].ring_dec.no_user_submission) ++num_rings; } ib_start_alignment = 256; @@ -460,7 +507,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, continue; for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++) - if (adev->vcn.inst[i].ring_enc[j].sched.ready) + if (adev->vcn.inst[i].ring_enc[j].sched.ready && + !adev->vcn.inst[i].ring_enc[j].no_user_submission) ++num_rings; } ib_start_alignment = 256; @@ -475,7 +523,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) - if (adev->jpeg.inst[i].ring_dec[j].sched.ready) + if (adev->jpeg.inst[i].ring_dec[j].sched.ready && + !adev->jpeg.inst[i].ring_dec[j].no_user_submission) ++num_rings; } ib_start_alignment = 256; @@ -483,7 +532,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VPE: type = AMD_IP_BLOCK_TYPE_VPE; - if (adev->vpe.ring.sched.ready) + if (adev->vpe.ring.sched.ready && + !adev->vpe.ring.no_user_submission) ++num_rings; ib_start_alignment = 256; ib_size_alignment = 4; @@ -539,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, } result->capabilities_flags = 0; result->available_rings = (1 << num_rings) - 1; + result->userq_num_slots = num_slots; result->ib_start_alignment = ib_start_alignment; result->ib_size_alignment = ib_size_alignment; return 0; @@ -978,6 +1029,8 @@ out: } } + dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + ret = copy_to_user(out, dev_info, min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; kfree(dev_info); @@ -1293,6 +1346,22 @@ out: return copy_to_user(out, &gpuvm_fault, min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0; } + case AMDGPU_INFO_UQ_FW_AREAS: { + struct drm_amdgpu_info_uq_metadata meta_info = {}; + + switch (info->query_hw_ip.type) { + case AMDGPU_HW_IP_GFX: + ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx); + if (ret) + return ret; + + ret = copy_to_user(out, &meta_info, + min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0; + return 0; + default: + return -EINVAL; + } + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; @@ -1346,6 +1415,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (r) goto error_pasid; + amdgpu_debugfs_vm_init(file_priv); + r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); if (r) goto error_pasid; @@ -1376,6 +1447,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) mutex_init(&fpriv->bo_list_lock); idr_init_base(&fpriv->bo_list_handles, 1); + r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev); + if (r) + DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n"); + + r = amdgpu_eviction_fence_init(&fpriv->evf_mgr); + if (r) + goto error_vm; + amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev); file_priv->driver_priv = fpriv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 85f774063f9b..135598502c8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) PAGE_SIZE); } -static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, - int ip_type, uint64_t *doorbell_index) -{ - unsigned int offset, found; - struct amdgpu_mes *mes = &adev->mes; - - if (ip_type == AMDGPU_RING_TYPE_SDMA) - offset = adev->doorbell_index.sdma_engine[0]; - else - offset = 0; - - found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset); - if (found >= mes->num_mes_dbs) { - DRM_WARN("No doorbell available\n"); - return -ENOSPC; - } - - set_bit(found, mes->doorbell_bitmap); - - /* Get the absolute doorbell index on BAR */ - *doorbell_index = mes->db_start_dw_offset + found * 2; - return 0; -} - -static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, - uint32_t doorbell_index) -{ - unsigned int old, rel_index; - struct amdgpu_mes *mes = &adev->mes; - - /* Find the relative index of the doorbell in this object */ - rel_index = (doorbell_index - mes->db_start_dw_offset) / 2; - old = test_and_clear_bit(rel_index, mes->doorbell_bitmap); - WARN_ON(!old); -} - static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) { int i; @@ -83,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) /* Bitmap for dynamic allocation of kernel doorbells */ mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); if (!mes->doorbell_bitmap) { - DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); + dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n"); return -ENOMEM; } @@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) int amdgpu_mes_init(struct amdgpu_device *adev) { - int i, r; + int i, r, num_pipes; adev->mes.adev = adev; @@ -142,19 +106,52 @@ int amdgpu_mes_init(struct amdgpu_device *adev) adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; adev->mes.vmid_mask_mmhub = 0xffffff00; - adev->mes.vmid_mask_gfxhub = 0xffffff00; + adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00; + + num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me; + if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES) + dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n", + num_pipes, AMDGPU_MES_MAX_GFX_PIPES); + + for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) { + if (i >= num_pipes) + break; + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= + IP_VERSION(12, 0, 0)) + /* + * GFX V12 has only one GFX pipe, but 8 queues in it. + * GFX pipe 0 queue 0 is being used by Kernel queue. + * Set GFX pipe 0 queue 1-7 for MES scheduling + * mask = 1111 1110b + */ + adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE; + else + /* + * GFX pipe 0 queue 0 is being used by Kernel queue. + * Set GFX pipe 0 queue 1 for MES scheduling + * mask = 10b + */ + adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2; + } + + num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec; + if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES) + dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n", + num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES); for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { - if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec)) + if (i >= num_pipes) break; - adev->mes.compute_hqd_mask[i] = 0xc; + adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC; } - for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) - adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; + num_pipes = adev->sdma.num_instances; + if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES) + dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n", + num_pipes, AMDGPU_MES_MAX_SDMA_PIPES); for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { - if (i >= adev->sdma.num_instances) + if (i >= num_pipes) break; adev->mes.sdma_hqd_mask[i] = 0xfc; } @@ -240,244 +237,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev) mutex_destroy(&adev->mes.mutex_hidden); } -static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) -{ - amdgpu_bo_free_kernel(&q->mqd_obj, - &q->mqd_gpu_addr, - &q->mqd_cpu_ptr); -} - -int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, - struct amdgpu_vm *vm) -{ - struct amdgpu_mes_process *process; - int r; - - /* allocate the mes process buffer */ - process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); - if (!process) { - DRM_ERROR("no more memory to create mes process\n"); - return -ENOMEM; - } - - /* allocate the process context bo and map it */ - r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &process->proc_ctx_bo, - &process->proc_ctx_gpu_addr, - &process->proc_ctx_cpu_ptr); - if (r) { - DRM_ERROR("failed to allocate process context bo\n"); - goto clean_up_memory; - } - memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - /* add the mes process to idr list */ - r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, - GFP_KERNEL); - if (r < 0) { - DRM_ERROR("failed to lock pasid=%d\n", pasid); - goto clean_up_ctx; - } - - INIT_LIST_HEAD(&process->gang_list); - process->vm = vm; - process->pasid = pasid; - process->process_quantum = adev->mes.default_process_quantum; - process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); - - amdgpu_mes_unlock(&adev->mes); - return 0; - -clean_up_ctx: - amdgpu_mes_unlock(&adev->mes); - amdgpu_bo_free_kernel(&process->proc_ctx_bo, - &process->proc_ctx_gpu_addr, - &process->proc_ctx_cpu_ptr); -clean_up_memory: - kfree(process); - return r; -} - -void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) -{ - struct amdgpu_mes_process *process; - struct amdgpu_mes_gang *gang, *tmp1; - struct amdgpu_mes_queue *queue, *tmp2; - struct mes_remove_queue_input queue_input; - unsigned long flags; - int r; - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - process = idr_find(&adev->mes.pasid_idr, pasid); - if (!process) { - DRM_WARN("pasid %d doesn't exist\n", pasid); - amdgpu_mes_unlock(&adev->mes); - return; - } - - /* Remove all queues from hardware */ - list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { - list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { - spin_lock_irqsave(&adev->mes.queue_id_lock, flags); - idr_remove(&adev->mes.queue_id_idr, queue->queue_id); - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - - queue_input.doorbell_offset = queue->doorbell_off; - queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; - - r = adev->mes.funcs->remove_hw_queue(&adev->mes, - &queue_input); - if (r) - DRM_WARN("failed to remove hardware queue\n"); - } - - idr_remove(&adev->mes.gang_id_idr, gang->gang_id); - } - - idr_remove(&adev->mes.pasid_idr, pasid); - amdgpu_mes_unlock(&adev->mes); - - /* free all memory allocated by the process */ - list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { - /* free all queues in the gang */ - list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { - amdgpu_mes_queue_free_mqd(queue); - list_del(&queue->list); - kfree(queue); - } - amdgpu_bo_free_kernel(&gang->gang_ctx_bo, - &gang->gang_ctx_gpu_addr, - &gang->gang_ctx_cpu_ptr); - list_del(&gang->list); - kfree(gang); - - } - amdgpu_bo_free_kernel(&process->proc_ctx_bo, - &process->proc_ctx_gpu_addr, - &process->proc_ctx_cpu_ptr); - kfree(process); -} - -int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, - struct amdgpu_mes_gang_properties *gprops, - int *gang_id) -{ - struct amdgpu_mes_process *process; - struct amdgpu_mes_gang *gang; - int r; - - /* allocate the mes gang buffer */ - gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); - if (!gang) { - return -ENOMEM; - } - - /* allocate the gang context bo and map it to cpu space */ - r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &gang->gang_ctx_bo, - &gang->gang_ctx_gpu_addr, - &gang->gang_ctx_cpu_ptr); - if (r) { - DRM_ERROR("failed to allocate process context bo\n"); - goto clean_up_mem; - } - memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - process = idr_find(&adev->mes.pasid_idr, pasid); - if (!process) { - DRM_ERROR("pasid %d doesn't exist\n", pasid); - r = -EINVAL; - goto clean_up_ctx; - } - - /* add the mes gang to idr list */ - r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, - GFP_KERNEL); - if (r < 0) { - DRM_ERROR("failed to allocate idr for gang\n"); - goto clean_up_ctx; - } - - gang->gang_id = r; - *gang_id = r; - - INIT_LIST_HEAD(&gang->queue_list); - gang->process = process; - gang->priority = gprops->priority; - gang->gang_quantum = gprops->gang_quantum ? - gprops->gang_quantum : adev->mes.default_gang_quantum; - gang->global_priority_level = gprops->global_priority_level; - gang->inprocess_gang_priority = gprops->inprocess_gang_priority; - list_add_tail(&gang->list, &process->gang_list); - - amdgpu_mes_unlock(&adev->mes); - return 0; - -clean_up_ctx: - amdgpu_mes_unlock(&adev->mes); - amdgpu_bo_free_kernel(&gang->gang_ctx_bo, - &gang->gang_ctx_gpu_addr, - &gang->gang_ctx_cpu_ptr); -clean_up_mem: - kfree(gang); - return r; -} - -int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) -{ - struct amdgpu_mes_gang *gang; - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - gang = idr_find(&adev->mes.gang_id_idr, gang_id); - if (!gang) { - DRM_ERROR("gang id %d doesn't exist\n", gang_id); - amdgpu_mes_unlock(&adev->mes); - return -EINVAL; - } - - if (!list_empty(&gang->queue_list)) { - DRM_ERROR("queue list is not empty\n"); - amdgpu_mes_unlock(&adev->mes); - return -EBUSY; - } - - idr_remove(&adev->mes.gang_id_idr, gang->gang_id); - list_del(&gang->list); - amdgpu_mes_unlock(&adev->mes); - - amdgpu_bo_free_kernel(&gang->gang_ctx_bo, - &gang->gang_ctx_gpu_addr, - &gang->gang_ctx_cpu_ptr); - - kfree(gang); - - return 0; -} - int amdgpu_mes_suspend(struct amdgpu_device *adev) { struct mes_suspend_gang_input input; @@ -497,7 +256,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev) r = adev->mes.funcs->suspend_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to suspend all gangs"); + dev_err(adev->dev, "failed to suspend all gangs"); return r; } @@ -521,306 +280,8 @@ int amdgpu_mes_resume(struct amdgpu_device *adev) r = adev->mes.funcs->resume_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to resume all gangs"); - - return r; -} - -static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, - struct amdgpu_mes_queue *q, - struct amdgpu_mes_queue_properties *p) -{ - struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; - u32 mqd_size = mqd_mgr->mqd_size; - int r; - - r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &q->mqd_obj, - &q->mqd_gpu_addr, &q->mqd_cpu_ptr); - if (r) { - dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); - return r; - } - memset(q->mqd_cpu_ptr, 0, mqd_size); - - r = amdgpu_bo_reserve(q->mqd_obj, false); - if (unlikely(r != 0)) - goto clean_up; - - return 0; - -clean_up: - amdgpu_bo_free_kernel(&q->mqd_obj, - &q->mqd_gpu_addr, - &q->mqd_cpu_ptr); - return r; -} - -static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, - struct amdgpu_mes_queue *q, - struct amdgpu_mes_queue_properties *p) -{ - struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; - struct amdgpu_mqd_prop mqd_prop = {0}; - - mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; - mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; - mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; - mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; - mqd_prop.queue_size = p->queue_size; - mqd_prop.use_doorbell = true; - mqd_prop.doorbell_index = p->doorbell_off; - mqd_prop.eop_gpu_addr = p->eop_gpu_addr; - mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; - mqd_prop.hqd_queue_priority = p->hqd_queue_priority; - mqd_prop.hqd_active = false; - - if (p->queue_type == AMDGPU_RING_TYPE_GFX || - p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { - mutex_lock(&adev->srbm_mutex); - amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); - } - - mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); - - if (p->queue_type == AMDGPU_RING_TYPE_GFX || - p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { - amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); - } - - amdgpu_bo_unreserve(q->mqd_obj); -} - -int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, - struct amdgpu_mes_queue_properties *qprops, - int *queue_id) -{ - struct amdgpu_mes_queue *queue; - struct amdgpu_mes_gang *gang; - struct mes_add_queue_input queue_input; - unsigned long flags; - int r; - - memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); - - /* allocate the mes queue buffer */ - queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); - if (!queue) { - DRM_ERROR("Failed to allocate memory for queue\n"); - return -ENOMEM; - } - - /* Allocate the queue mqd */ - r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); - if (r) - goto clean_up_memory; - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - gang = idr_find(&adev->mes.gang_id_idr, gang_id); - if (!gang) { - DRM_ERROR("gang id %d doesn't exist\n", gang_id); - r = -EINVAL; - goto clean_up_mqd; - } - - /* add the mes gang to idr list */ - spin_lock_irqsave(&adev->mes.queue_id_lock, flags); - r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, - GFP_ATOMIC); - if (r < 0) { - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - goto clean_up_mqd; - } - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - *queue_id = queue->queue_id = r; - - /* allocate a doorbell index for the queue */ - r = amdgpu_mes_kernel_doorbell_get(adev, - qprops->queue_type, - &qprops->doorbell_off); - if (r) - goto clean_up_queue_id; - - /* initialize the queue mqd */ - amdgpu_mes_queue_init_mqd(adev, queue, qprops); - - /* add hw queue to mes */ - queue_input.process_id = gang->process->pasid; - - queue_input.page_table_base_addr = - adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - - adev->gmc.vram_start; - - queue_input.process_va_start = 0; - queue_input.process_va_end = - (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; - queue_input.process_quantum = gang->process->process_quantum; - queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; - queue_input.gang_quantum = gang->gang_quantum; - queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; - queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; - queue_input.gang_global_priority_level = gang->global_priority_level; - queue_input.doorbell_offset = qprops->doorbell_off; - queue_input.mqd_addr = queue->mqd_gpu_addr; - queue_input.wptr_addr = qprops->wptr_gpu_addr; - queue_input.wptr_mc_addr = qprops->wptr_mc_addr; - queue_input.queue_type = qprops->queue_type; - queue_input.paging = qprops->paging; - queue_input.is_kfd_process = 0; - - r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); - if (r) { - DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", - qprops->doorbell_off); - goto clean_up_doorbell; - } - - DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " - "queue type=%d, doorbell=0x%llx\n", - gang->process->pasid, gang_id, qprops->queue_type, - qprops->doorbell_off); - - queue->ring = qprops->ring; - queue->doorbell_off = qprops->doorbell_off; - queue->wptr_gpu_addr = qprops->wptr_gpu_addr; - queue->queue_type = qprops->queue_type; - queue->paging = qprops->paging; - queue->gang = gang; - queue->ring->mqd_ptr = queue->mqd_cpu_ptr; - list_add_tail(&queue->list, &gang->queue_list); - - amdgpu_mes_unlock(&adev->mes); - return 0; - -clean_up_doorbell: - amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); -clean_up_queue_id: - spin_lock_irqsave(&adev->mes.queue_id_lock, flags); - idr_remove(&adev->mes.queue_id_idr, queue->queue_id); - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); -clean_up_mqd: - amdgpu_mes_unlock(&adev->mes); - amdgpu_mes_queue_free_mqd(queue); -clean_up_memory: - kfree(queue); - return r; -} - -int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) -{ - unsigned long flags; - struct amdgpu_mes_queue *queue; - struct amdgpu_mes_gang *gang; - struct mes_remove_queue_input queue_input; - int r; - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - /* remove the mes gang from idr list */ - spin_lock_irqsave(&adev->mes.queue_id_lock, flags); - - queue = idr_find(&adev->mes.queue_id_idr, queue_id); - if (!queue) { - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - amdgpu_mes_unlock(&adev->mes); - DRM_ERROR("queue id %d doesn't exist\n", queue_id); - return -EINVAL; - } - - idr_remove(&adev->mes.queue_id_idr, queue_id); - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - - DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", - queue->doorbell_off); - - gang = queue->gang; - queue_input.doorbell_offset = queue->doorbell_off; - queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; - - r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); - if (r) - DRM_ERROR("failed to remove hardware queue, queue id = %d\n", - queue_id); - - list_del(&queue->list); - amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); - amdgpu_mes_unlock(&adev->mes); - - amdgpu_mes_queue_free_mqd(queue); - kfree(queue); - return 0; -} - -int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id) -{ - unsigned long flags; - struct amdgpu_mes_queue *queue; - struct amdgpu_mes_gang *gang; - struct mes_reset_queue_input queue_input; - int r; - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - - /* remove the mes gang from idr list */ - spin_lock_irqsave(&adev->mes.queue_id_lock, flags); - - queue = idr_find(&adev->mes.queue_id_idr, queue_id); - if (!queue) { - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - amdgpu_mes_unlock(&adev->mes); - DRM_ERROR("queue id %d doesn't exist\n", queue_id); - return -EINVAL; - } - spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); - - DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n", - queue->doorbell_off); - - gang = queue->gang; - queue_input.doorbell_offset = queue->doorbell_off; - queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; - - r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); - if (r) - DRM_ERROR("failed to reset hardware queue, queue id = %d\n", - queue_id); + dev_err(adev->dev, "failed to resume all gangs"); - amdgpu_mes_unlock(&adev->mes); - - return 0; -} - -int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type, - int me_id, int pipe_id, int queue_id, int vmid) -{ - struct mes_reset_queue_input queue_input; - int r; - - queue_input.queue_type = queue_type; - queue_input.use_mmio = true; - queue_input.me_id = me_id; - queue_input.pipe_id = pipe_id; - queue_input.queue_id = queue_id; - queue_input.vmid = vmid; - r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); - if (r) - DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n", - queue_id); return r; } @@ -839,9 +300,11 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); queue_input.wptr_addr = ring->wptr_gpu_addr; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to map legacy queue\n"); + dev_err(adev->dev, "failed to map legacy queue\n"); return r; } @@ -862,9 +325,11 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, queue_input.trail_fence_addr = gpu_addr; queue_input.trail_fence_data = seq; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to unmap legacy queue\n"); + dev_err(adev->dev, "failed to unmap legacy queue\n"); return r; } @@ -874,7 +339,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, unsigned int vmid, bool use_mmio) { - struct mes_reset_legacy_queue_input queue_input; + struct mes_reset_queue_input queue_input; int r; memset(&queue_input, 0, sizeof(queue_input)); @@ -888,10 +353,15 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, queue_input.wptr_addr = ring->wptr_gpu_addr; queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; + queue_input.is_kq = true; + if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) + queue_input.legacy_gfx = true; - r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to reset legacy queue\n"); + dev_err(adev->dev, "failed to reset legacy queue\n"); return r; } @@ -905,7 +375,7 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) uint32_t *read_val_ptr; if (amdgpu_device_wb_get(adev, &addr_offset)) { - DRM_ERROR("critical bug! too many mes readers\n"); + dev_err(adev->dev, "critical bug! too many mes readers\n"); goto error; } read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4); @@ -915,13 +385,15 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) op_input.read_reg.buffer_addr = read_val_gpu_addr; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes rreg is not supported!\n"); + dev_err(adev->dev, "mes rreg is not supported!\n"); goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to read reg (0x%x)\n", reg); + dev_err(adev->dev, "failed to read reg (0x%x)\n", reg); else val = *(read_val_ptr); @@ -942,14 +414,16 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev, op_input.write_reg.reg_value = val; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes wreg is not supported!\n"); + dev_err(adev->dev, "mes wreg is not supported!\n"); r = -EINVAL; goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to write reg (0x%x)\n", reg); + dev_err(adev->dev, "failed to write reg (0x%x)\n", reg); error: return r; @@ -969,39 +443,16 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, op_input.wrm_reg.mask = mask; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); - r = -EINVAL; - goto error; - } - - r = adev->mes.funcs->misc_op(&adev->mes, &op_input); - if (r) - DRM_ERROR("failed to reg_write_reg_wait\n"); - -error: - return r; -} - -int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, - uint32_t val, uint32_t mask) -{ - struct mes_misc_op_input op_input; - int r; - - op_input.op = MES_MISC_OP_WRM_REG_WAIT; - op_input.wrm_reg.reg0 = reg; - op_input.wrm_reg.ref = val; - op_input.wrm_reg.mask = mask; - - if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes reg wait is not supported!\n"); + dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n"); r = -EINVAL; goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to reg_write_reg_wait\n"); + dev_err(adev->dev, "failed to reg_write_reg_wait\n"); error: return r; @@ -1018,7 +469,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, int r; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes set shader debugger is not supported!\n"); + dev_err(adev->dev, + "mes set shader debugger is not supported!\n"); return -EINVAL; } @@ -1042,7 +494,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) - DRM_ERROR("failed to set_shader_debugger\n"); + dev_err(adev->dev, "failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); @@ -1056,7 +508,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, int r; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes flush shader debugger is not supported!\n"); + dev_err(adev->dev, + "mes flush shader debugger is not supported!\n"); return -EINVAL; } @@ -1068,515 +521,19 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) - DRM_ERROR("failed to set_shader_debugger\n"); - - amdgpu_mes_unlock(&adev->mes); - - return r; -} - -static void -amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_mes_queue_properties *props) -{ - props->queue_type = ring->funcs->type; - props->hqd_base_gpu_addr = ring->gpu_addr; - props->rptr_gpu_addr = ring->rptr_gpu_addr; - props->wptr_gpu_addr = ring->wptr_gpu_addr; - props->wptr_mc_addr = - ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; - props->queue_size = ring->ring_size; - props->eop_gpu_addr = ring->eop_gpu_addr; - props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; - props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; - props->paging = false; - props->ring = ring; -} - -#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ -do { \ - if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ - return offsetof(struct amdgpu_mes_ctx_meta_data, \ - _eng[ring->idx].slots[id_offs]); \ - else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ - return offsetof(struct amdgpu_mes_ctx_meta_data, \ - _eng[ring->idx].ring); \ - else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ - return offsetof(struct amdgpu_mes_ctx_meta_data, \ - _eng[ring->idx].ib); \ - else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ - return offsetof(struct amdgpu_mes_ctx_meta_data, \ - _eng[ring->idx].padding); \ -} while(0) - -int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) -{ - switch (ring->funcs->type) { - case AMDGPU_RING_TYPE_GFX: - DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); - break; - case AMDGPU_RING_TYPE_COMPUTE: - DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); - break; - case AMDGPU_RING_TYPE_SDMA: - DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); - break; - default: - break; - } - - WARN_ON(1); - return -EINVAL; -} - -int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, - int queue_type, int idx, - struct amdgpu_mes_ctx_data *ctx_data, - struct amdgpu_ring **out) -{ - struct amdgpu_ring *ring; - struct amdgpu_mes_gang *gang; - struct amdgpu_mes_queue_properties qprops = {0}; - int r, queue_id, pasid; - - /* - * Avoid taking any other locks under MES lock to avoid circular - * lock dependencies. - */ - amdgpu_mes_lock(&adev->mes); - gang = idr_find(&adev->mes.gang_id_idr, gang_id); - if (!gang) { - DRM_ERROR("gang id %d doesn't exist\n", gang_id); - amdgpu_mes_unlock(&adev->mes); - return -EINVAL; - } - pasid = gang->process->pasid; - - ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); - if (!ring) { - amdgpu_mes_unlock(&adev->mes); - return -ENOMEM; - } - - ring->ring_obj = NULL; - ring->use_doorbell = true; - ring->is_mes_queue = true; - ring->mes_ctx = ctx_data; - ring->idx = idx; - ring->no_scheduler = true; - - if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { - int offset = offsetof(struct amdgpu_mes_ctx_meta_data, - compute[ring->idx].mec_hpd); - ring->eop_gpu_addr = - amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - } + dev_err(adev->dev, "failed to set_shader_debugger\n"); - switch (queue_type) { - case AMDGPU_RING_TYPE_GFX: - ring->funcs = adev->gfx.gfx_ring[0].funcs; - ring->me = adev->gfx.gfx_ring[0].me; - ring->pipe = adev->gfx.gfx_ring[0].pipe; - break; - case AMDGPU_RING_TYPE_COMPUTE: - ring->funcs = adev->gfx.compute_ring[0].funcs; - ring->me = adev->gfx.compute_ring[0].me; - ring->pipe = adev->gfx.compute_ring[0].pipe; - break; - case AMDGPU_RING_TYPE_SDMA: - ring->funcs = adev->sdma.instance[0].ring.funcs; - break; - default: - BUG(); - } - - r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) { - amdgpu_mes_unlock(&adev->mes); - goto clean_up_memory; - } - - amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); - - dma_fence_wait(gang->process->vm->last_update, false); - dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); amdgpu_mes_unlock(&adev->mes); - r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); - if (r) - goto clean_up_ring; - - ring->hw_queue_id = queue_id; - ring->doorbell_index = qprops.doorbell_off; - - if (queue_type == AMDGPU_RING_TYPE_GFX) - sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); - else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) - sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, - queue_id); - else if (queue_type == AMDGPU_RING_TYPE_SDMA) - sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, - queue_id); - else - BUG(); - - *out = ring; - return 0; - -clean_up_ring: - amdgpu_ring_fini(ring); -clean_up_memory: - kfree(ring); return r; } -void amdgpu_mes_remove_ring(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring) - return; - - amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); - del_timer_sync(&ring->fence_drv.fallback_timer); - amdgpu_ring_fini(ring); - kfree(ring); -} - uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, enum amdgpu_mes_priority_level prio) { return adev->mes.aggregated_doorbells[prio]; } -int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, - struct amdgpu_mes_ctx_data *ctx_data) -{ - int r; - - r = amdgpu_bo_create_kernel(adev, - sizeof(struct amdgpu_mes_ctx_meta_data), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &ctx_data->meta_data_obj, - &ctx_data->meta_data_mc_addr, - &ctx_data->meta_data_ptr); - if (r) { - dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); - return r; - } - - if (!ctx_data->meta_data_obj) - return -ENOMEM; - - memset(ctx_data->meta_data_ptr, 0, - sizeof(struct amdgpu_mes_ctx_meta_data)); - - return 0; -} - -void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) -{ - if (ctx_data->meta_data_obj) - amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, - &ctx_data->meta_data_mc_addr, - &ctx_data->meta_data_ptr); -} - -int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_mes_ctx_data *ctx_data) -{ - struct amdgpu_bo_va *bo_va; - struct amdgpu_sync sync; - struct drm_exec exec; - int r; - - amdgpu_sync_create(&sync); - - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked(&exec) { - r = drm_exec_lock_obj(&exec, - &ctx_data->meta_data_obj->tbo.base); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto error_fini_exec; - - r = amdgpu_vm_lock_pd(vm, &exec, 0); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto error_fini_exec; - } - - bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); - if (!bo_va) { - DRM_ERROR("failed to create bo_va for meta data BO\n"); - r = -ENOMEM; - goto error_fini_exec; - } - - r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, - sizeof(struct amdgpu_mes_ctx_meta_data), - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); - - if (r) { - DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); - goto error_del_bo_va; - } - - r = amdgpu_vm_bo_update(adev, bo_va, false); - if (r) { - DRM_ERROR("failed to do vm_bo_update on meta data\n"); - goto error_del_bo_va; - } - amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL); - - r = amdgpu_vm_update_pdes(adev, vm, false); - if (r) { - DRM_ERROR("failed to update pdes on meta data\n"); - goto error_del_bo_va; - } - amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL); - - amdgpu_sync_wait(&sync, false); - drm_exec_fini(&exec); - - amdgpu_sync_free(&sync); - ctx_data->meta_data_va = bo_va; - return 0; - -error_del_bo_va: - amdgpu_vm_bo_del(adev, bo_va); - -error_fini_exec: - drm_exec_fini(&exec); - amdgpu_sync_free(&sync); - return r; -} - -int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, - struct amdgpu_mes_ctx_data *ctx_data) -{ - struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; - struct amdgpu_bo *bo = ctx_data->meta_data_obj; - struct amdgpu_vm *vm = bo_va->base.vm; - struct dma_fence *fence; - struct drm_exec exec; - long r; - - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked(&exec) { - r = drm_exec_lock_obj(&exec, - &ctx_data->meta_data_obj->tbo.base); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto out_unlock; - - r = amdgpu_vm_lock_pd(vm, &exec, 0); - drm_exec_retry_on_contention(&exec); - if (unlikely(r)) - goto out_unlock; - } - - amdgpu_vm_bo_del(adev, bo_va); - if (!amdgpu_vm_ready(vm)) - goto out_unlock; - - r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, - &fence); - if (r) - goto out_unlock; - if (fence) { - amdgpu_bo_fence(bo, fence, true); - fence = NULL; - } - - r = amdgpu_vm_clear_freed(adev, vm, &fence); - if (r || !fence) - goto out_unlock; - - dma_fence_wait(fence, false); - amdgpu_bo_fence(bo, fence, true); - dma_fence_put(fence); - -out_unlock: - if (unlikely(r < 0)) - dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); - drm_exec_fini(&exec); - - return r; -} - -static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, - int pasid, int *gang_id, - int queue_type, int num_queue, - struct amdgpu_ring **added_rings, - struct amdgpu_mes_ctx_data *ctx_data) -{ - struct amdgpu_ring *ring; - struct amdgpu_mes_gang_properties gprops = {0}; - int r, j; - - /* create a gang for the process */ - gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; - gprops.gang_quantum = adev->mes.default_gang_quantum; - gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; - gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; - gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; - - r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); - if (r) { - DRM_ERROR("failed to add gang\n"); - return r; - } - - /* create queues for the gang */ - for (j = 0; j < num_queue; j++) { - r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, - ctx_data, &ring); - if (r) { - DRM_ERROR("failed to add ring\n"); - break; - } - - DRM_INFO("ring %s was added\n", ring->name); - added_rings[j] = ring; - } - - return 0; -} - -static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) -{ - struct amdgpu_ring *ring; - int i, r; - - for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { - ring = added_rings[i]; - if (!ring) - continue; - - r = amdgpu_ring_test_helper(ring); - if (r) - return r; - - r = amdgpu_ring_test_ib(ring, 1000 * 10); - if (r) { - DRM_DEV_ERROR(ring->adev->dev, - "ring %s ib test failed (%d)\n", - ring->name, r); - return r; - } else - DRM_INFO("ring %s ib test pass\n", ring->name); - } - - return 0; -} - -int amdgpu_mes_self_test(struct amdgpu_device *adev) -{ - struct amdgpu_vm *vm = NULL; - struct amdgpu_mes_ctx_data ctx_data = {0}; - struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; - int gang_ids[3] = {0}; - int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, - { AMDGPU_RING_TYPE_COMPUTE, 1 }, - { AMDGPU_RING_TYPE_SDMA, 1} }; - int i, r, pasid, k = 0; - - pasid = amdgpu_pasid_alloc(16); - if (pasid < 0) { - dev_warn(adev->dev, "No more PASIDs available!"); - pasid = 0; - } - - vm = kzalloc(sizeof(*vm), GFP_KERNEL); - if (!vm) { - r = -ENOMEM; - goto error_pasid; - } - - r = amdgpu_vm_init(adev, vm, -1); - if (r) { - DRM_ERROR("failed to initialize vm\n"); - goto error_pasid; - } - - r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); - if (r) { - DRM_ERROR("failed to alloc ctx meta data\n"); - goto error_fini; - } - - ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; - r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); - if (r) { - DRM_ERROR("failed to map ctx meta data\n"); - goto error_vm; - } - - r = amdgpu_mes_create_process(adev, pasid, vm); - if (r) { - DRM_ERROR("failed to create MES process\n"); - goto error_vm; - } - - for (i = 0; i < ARRAY_SIZE(queue_types); i++) { - /* On GFX v10.3, fw hasn't supported to map sdma queue. */ - if (amdgpu_ip_version(adev, GC_HWIP, 0) >= - IP_VERSION(10, 3, 0) && - amdgpu_ip_version(adev, GC_HWIP, 0) < - IP_VERSION(11, 0, 0) && - queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) - continue; - - r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, - &gang_ids[i], - queue_types[i][0], - queue_types[i][1], - &added_rings[k], - &ctx_data); - if (r) - goto error_queues; - - k += queue_types[i][1]; - } - - /* start ring test and ib test for MES queues */ - amdgpu_mes_test_queues(added_rings); - -error_queues: - /* remove all queues */ - for (i = 0; i < ARRAY_SIZE(added_rings); i++) { - if (!added_rings[i]) - continue; - amdgpu_mes_remove_ring(adev, added_rings[i]); - } - - for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { - if (!gang_ids[i]) - continue; - amdgpu_mes_remove_gang(adev, gang_ids[i]); - } - - amdgpu_mes_destroy_process(adev, pasid); - -error_vm: - amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data); - -error_fini: - amdgpu_vm_fini(adev, vm); - -error_pasid: - if (pasid) - amdgpu_pasid_free(pasid); - - amdgpu_mes_ctx_free_meta_data(&ctx_data); - kfree(vm); - return 0; -} - int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) { const struct mes_firmware_header_v1_0 *mes_hdr; @@ -1690,7 +647,9 @@ static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) dev_err(adev->dev, "failed to change_config.\n"); @@ -1705,7 +664,7 @@ int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev) if (adev->enable_mes && adev->gfx.enable_cleaner_shader) { mutex_lock(&adev->enforce_isolation_mutex); for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { - if (adev->enforce_isolation[i]) + if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE) r |= amdgpu_mes_set_enforce_isolation(adev, i, true); else r |= amdgpu_mes_set_enforce_isolation(adev, i, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index da2c9a8cb3e0..c0d2c195fe2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -111,8 +111,8 @@ struct amdgpu_mes { uint32_t vmid_mask_gfxhub; uint32_t vmid_mask_mmhub; - uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES]; uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES]; + uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES]; uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES]; uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS]; uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES]; @@ -149,19 +149,6 @@ struct amdgpu_mes { }; -struct amdgpu_mes_process { - int pasid; - struct amdgpu_vm *vm; - uint64_t pd_gpu_addr; - struct amdgpu_bo *proc_ctx_bo; - uint64_t proc_ctx_gpu_addr; - void *proc_ctx_cpu_ptr; - uint64_t process_quantum; - struct list_head gang_list; - uint32_t doorbell_index; - struct mutex doorbell_lock; -}; - struct amdgpu_mes_gang { int gang_id; int priority; @@ -248,18 +235,6 @@ struct mes_remove_queue_input { uint64_t gang_context_addr; }; -struct mes_reset_queue_input { - uint32_t doorbell_offset; - uint64_t gang_context_addr; - bool use_mmio; - uint32_t queue_type; - uint32_t me_id; - uint32_t pipe_id; - uint32_t queue_id; - uint32_t xcc_id; - uint32_t vmid; -}; - struct mes_map_legacy_queue_input { uint32_t queue_type; uint32_t doorbell_offset; @@ -291,7 +266,7 @@ struct mes_resume_gang_input { uint64_t gang_context_addr; }; -struct mes_reset_legacy_queue_input { +struct mes_reset_queue_input { uint32_t queue_type; uint32_t doorbell_offset; bool use_mmio; @@ -301,6 +276,8 @@ struct mes_reset_legacy_queue_input { uint64_t mqd_addr; uint64_t wptr_addr; uint32_t vmid; + bool legacy_gfx; + bool is_kq; }; enum mes_misc_opcode { @@ -388,9 +365,6 @@ struct amdgpu_mes_funcs { int (*misc_op)(struct amdgpu_mes *mes, struct mes_misc_op_input *input); - int (*reset_legacy_queue)(struct amdgpu_mes *mes, - struct mes_reset_legacy_queue_input *input); - int (*reset_hw_queue)(struct amdgpu_mes *mes, struct mes_reset_queue_input *input); }; @@ -398,32 +372,13 @@ struct amdgpu_mes_funcs { #define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev)) #define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev)) -int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs); - int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe); int amdgpu_mes_init(struct amdgpu_device *adev); void amdgpu_mes_fini(struct amdgpu_device *adev); -int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, - struct amdgpu_vm *vm); -void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid); - -int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, - struct amdgpu_mes_gang_properties *gprops, - int *gang_id); -int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id); - int amdgpu_mes_suspend(struct amdgpu_device *adev); int amdgpu_mes_resume(struct amdgpu_device *adev); -int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, - struct amdgpu_mes_queue_properties *qprops, - int *queue_id); -int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id); -int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id); -int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type, - int me_id, int pipe_id, int queue_id, int vmid); - int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, @@ -438,8 +393,6 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg); int amdgpu_mes_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t val); -int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, - uint32_t val, uint32_t mask); int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); @@ -451,27 +404,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, bool trap_en); int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr); -int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, - int queue_type, int idx, - struct amdgpu_mes_ctx_data *ctx_data, - struct amdgpu_ring **out); -void amdgpu_mes_remove_ring(struct amdgpu_device *adev, - struct amdgpu_ring *ring); uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, enum amdgpu_mes_priority_level prio); -int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, - struct amdgpu_mes_ctx_data *ctx_data); -void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data); -int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_mes_ctx_data *ctx_data); -int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, - struct amdgpu_mes_ctx_data *ctx_data); - -int amdgpu_mes_self_test(struct amdgpu_device *adev); - int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index d085687a47ea..e56ba93a8df6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -53,6 +53,15 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev) return 0; } +bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev) || !adev->asic_funcs->get_pcie_replay_count || + (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count)) + return false; + + return true; +} + int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 79c2f807b9fe..b528de6a01f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev); +bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 80cd6f5273db..122a88294883 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -32,6 +32,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/dma-buf.h> +#include <linux/export.h> #include <drm/drm_drv.h> #include <drm/amdgpu_drm.h> @@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_kunmap(bo); - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); drm_gem_object_release(&bo->tbo.base); amdgpu_bo_unref(&bo->parent); @@ -163,8 +164,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) * When GTT is just an alternative to VRAM make sure that we * only use it as fallback and still try to fill up VRAM first. */ - if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && - !(adev->flags & AMD_IS_APU)) + if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) && + domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } @@ -351,7 +352,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, return 0; } -EXPORT_SYMBOL(amdgpu_bo_create_kernel); /** * amdgpu_bo_create_isp_user - create user BO for isp @@ -420,7 +420,6 @@ error_unreserve: return r; } -EXPORT_SYMBOL(amdgpu_bo_create_isp_user); /** * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location @@ -524,7 +523,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, if (cpu_addr) *cpu_addr = NULL; } -EXPORT_SYMBOL(amdgpu_bo_free_kernel); /** * amdgpu_bo_free_isp_user - free BO for isp use @@ -547,7 +545,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo) } amdgpu_bo_unref(&bo); } -EXPORT_SYMBOL(amdgpu_bo_free_isp_user); /* Validate bo size is bit bigger than the request domain */ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, @@ -939,7 +936,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) domain = bo->preferred_domains & domain; /* A shared bo cannot be migrated to VRAM */ - if (bo->tbo.base.import_attach) { + if (drm_gem_is_imported(&bo->tbo.base)) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else @@ -967,7 +964,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) */ domain = amdgpu_bo_get_preferred_domain(adev, domain); - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_pin(bo->tbo.base.import_attach); /* force to pin into visible video ram */ @@ -1018,7 +1015,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) if (bo->tbo.pin_count) return; - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_unpin(bo->tbo.base.import_attach); if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { @@ -1044,7 +1041,8 @@ static const char * const amdgpu_vram_names[] = { "GDDR6", "DDR5", "LPDDR4", - "LPDDR5" + "LPDDR5", + "HBM3E" }; /** @@ -1262,7 +1260,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); - if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && + if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) && old_mem && old_mem->mem_type != TTM_PL_SYSTEM) dma_buf_move_notify(abo->tbo.base.dma_buf); @@ -1472,6 +1470,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) } /** + * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo + * @bo: amdgpu VRAM buffer object for which we query the offset + * + * Returns: + * current FB aperture GPU offset of the object. + */ +u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + uint64_t offset, fb_base; + + WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM); + + fb_base = adev->gmc.fb_start; + fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base; + return amdgpu_gmc_sign_extend(offset); +} + +/** * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo * @bo: amdgpu object for which we query the offset * @@ -1644,7 +1662,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); - + /* Add the gem obj resv fence dump*/ + if (dma_resv_trylock(bo->tbo.base.resv)) { + dma_resv_describe(bo->tbo.base.resv, m); + dma_resv_unlock(bo->tbo.base.resv); + } seq_puts(m, "\n"); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 375448627f7b..c316920f3450 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -304,6 +304,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index df5d5dbd7f0f..0bd51a04be79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block) break; case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 3): + adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); psp_v14_0_set_psp_funcs(psp); break; case IP_VERSION(14, 0, 5): @@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block) return 0; } -int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t reg_val, uint32_t mask, bool check_changed) +int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, + uint32_t mask, uint32_t flags) { + bool check_changed = flags & PSP_WAITREG_CHANGED; + bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); uint32_t val; int i; struct amdgpu_device *adev = psp->adev; @@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, udelay(1); } + if (verbose) + dev_err(adev->dev, + "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", + reg_index, mask, val, reg_val); + return -ETIME; } @@ -654,6 +662,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) return "BOOT_CFG"; case GFX_CMD_ID_CONFIG_SQ_PERFMON: return "CONFIG_SQ_PERFMON"; + case GFX_CMD_ID_FB_FW_RESERV_ADDR: + return "FB_FW_RESERV_ADDR"; + case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: + return "FB_FW_RESERV_EXT_ADDR"; default: return "UNKNOWN CMD"; } @@ -871,6 +883,8 @@ static int psp_tmr_init(struct psp_context *psp) &psp->tmr_bo, &psp->tmr_mc_addr, pptr); } + if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) + psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); return ret; } @@ -984,6 +998,93 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp, return ret; } +static int psp_get_fw_reservation_info(struct psp_context *psp, + uint32_t cmd_id, + uint64_t *addr, + uint32_t *size) +{ + int ret; + uint32_t status; + struct psp_gfx_cmd_resp *cmd; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = cmd_id; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + if (ret) { + release_psp_cmd_buf(psp); + return ret; + } + + status = cmd->resp.status; + if (status == PSP_ERR_UNKNOWN_COMMAND) { + release_psp_cmd_buf(psp); + *addr = 0; + *size = 0; + return 0; + } + + *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | + cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; + *size = cmd->resp.uresp.fw_reserve_info.reserve_size; + + release_psp_cmd_buf(psp); + + return 0; +} + +int psp_update_fw_reservation(struct psp_context *psp) +{ + int ret; + uint64_t reserv_addr, reserv_addr_ext; + uint32_t reserv_size, reserv_size_ext; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) && + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3))) + return 0; + + ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); + if (ret) + return ret; + ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); + if (ret) + return ret; + + if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { + dev_warn(adev->dev, "reserve fw region is not valid!\n"); + return 0; + } + + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + + reserv_size = roundup(reserv_size, SZ_1M); + + ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); + if (ret) { + dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + return ret; + } + + reserv_size_ext = roundup(reserv_size_ext, SZ_1M); + + ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, + &adev->mman.fw_reserved_memory_extend, NULL); + if (ret) { + dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); + return ret; + } + + return 0; +} + static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) { struct psp_context *psp = &adev->psp; @@ -1270,6 +1371,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context) psp_copy_fw(psp, context->bin_desc.start_addr, context->bin_desc.size_bytes); + if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && + context->mem_context.shared_bo) + context->mem_context.shared_mc_addr = + amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); + psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); ret = psp_cmd_submit_buf(psp, NULL, cmd, @@ -2214,7 +2320,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp) if (!psp->securedisplay_context.context.bin_desc.size_bytes || !psp->securedisplay_context.context.bin_desc.start_addr) { - dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); + dev_info(psp->adev->dev, + "SECUREDISPLAY: optional securedisplay ta ucode is not available\n"); return 0; } @@ -2336,11 +2443,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) return false; } +static void psp_update_gpu_addresses(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + + if (psp->cmd_buf_bo && psp->cmd_buf_mem) { + psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); + psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); + psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); + } + if (adev->firmware.rbuf && psp->km_ring.ring_mem) + psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; int ret; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + psp_update_gpu_addresses(adev); + if (!amdgpu_sriov_vf(adev)) { if ((is_psp_fw_valid(psp->kdb)) && (psp->funcs->bootloader_load_kdb != NULL)) { @@ -2439,6 +2562,14 @@ static int psp_hw_start(struct psp_context *psp) return ret; } + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + ret = psp_update_fw_reservation(psp); + if (ret) { + dev_err(adev->dev, "update fw reservation failed!\n"); + return ret; + } + } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; @@ -3521,8 +3652,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) uint8_t *ucode_array_start_addr; int err = 0; - err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_sos.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sos_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sos.bin", chip_name); if (err) goto out; @@ -3798,8 +3933,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) struct amdgpu_device *adev = psp->adev; int err; - err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_ta.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_ta_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_ta.bin", chip_name); if (err) return err; @@ -4116,8 +4255,8 @@ rel_buf: static const struct bin_attribute psp_vbflash_bin_attr = { .attr = {.name = "psp_vbflash", .mode = 0660}, .size = 0, - .write_new = amdgpu_psp_vbflash_write, - .read_new = amdgpu_psp_vbflash_read, + .write = amdgpu_psp_vbflash_write, + .read = amdgpu_psp_vbflash_read, }; /** @@ -4180,11 +4319,115 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, const struct attribute_group amdgpu_flash_attr_group = { .attrs = flash_attrs, - .bin_attrs_new = bin_flash_attrs, + .bin_attrs = bin_flash_attrs, .is_bin_visible = amdgpu_bin_flash_attr_is_visible, .is_visible = amdgpu_flash_attr_is_visible, }; +#if defined(CONFIG_DEBUG_FS) +static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct spirom_bo *bo_triplet; + int ret; + + /* serialize the open() file calling */ + if (!mutex_trylock(&adev->psp.mutex)) + return -EBUSY; + + /* + * make sure only one userpace process is alive for dumping so that + * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. + * let's say the case where one process try opening the file while + * another one has proceeded to read or release. In this way, eliminate + * the use of mutex for read() or release() callback as well. + */ + if (adev->psp.spirom_dump_trip) { + mutex_unlock(&adev->psp.mutex); + return -EBUSY; + } + + bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); + if (!bo_triplet) { + mutex_unlock(&adev->psp.mutex); + return -ENOMEM; + } + + ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo_triplet->bo, + &bo_triplet->mc_addr, + &bo_triplet->cpu_addr); + if (ret) + goto rel_trip; + + ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); + if (ret) + goto rel_bo; + + adev->psp.spirom_dump_trip = bo_triplet; + mutex_unlock(&adev->psp.mutex); + return 0; +rel_bo: + amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, + &bo_triplet->cpu_addr); +rel_trip: + kfree(bo_triplet); + mutex_unlock(&adev->psp.mutex); + dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); + return ret; +} + +static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; + + if (!bo_triplet) + return -EINVAL; + + return simple_read_from_buffer(buf, + size, + pos, bo_triplet->cpu_addr, + AMD_VBIOS_FILE_MAX_SIZE_B * 2); +} + +static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; + + if (bo_triplet) { + amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, + &bo_triplet->cpu_addr); + kfree(bo_triplet); + } + + adev->psp.spirom_dump_trip = NULL; + return 0; +} + +static const struct file_operations psp_dump_spirom_debugfs_ops = { + .owner = THIS_MODULE, + .open = psp_read_spirom_debugfs_open, + .read = psp_read_spirom_debugfs_read, + .release = psp_read_spirom_debugfs_release, + .llseek = default_llseek, +}; +#endif + +void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + + debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, + adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); +#endif +} + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 8d5acc415d38..237b624aa51c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -39,6 +39,29 @@ #define PSP_TMR_ALIGNMENT 0x100000 #define PSP_FW_NAME_LEN 0x24 +/* VBIOS gfl defines */ +#define MBOX_READY_MASK 0x80000000 +#define MBOX_STATUS_MASK 0x0000FFFF +#define MBOX_COMMAND_MASK 0x00FF0000 +#define MBOX_READY_FLAG 0x80000000 +#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2 +#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3 +#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4 +#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf +#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10 +#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11 + +/* Command register bit 31 set to indicate readiness */ +#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE) +#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK) + +/* Values to check for a successful GFX_CMD response wait. Check against + * both status bits and response state - helps to detect a command failure + * or other unexpected cases like a device drop reading all 0xFFs + */ +#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE) +#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK) + extern const struct attribute_group amdgpu_flash_attr_group; enum psp_shared_mem_size { @@ -107,9 +130,13 @@ enum psp_reg_prog_id { PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */ PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */ PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */ + PSP_REG_MMHUB_L1_TLB_CNTL = 25, PSP_REG_LAST }; +#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */ +#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */ + struct psp_funcs { int (*init_microcode)(struct psp_context *psp); int (*wait_for_bootloader)(struct psp_context *psp); @@ -137,11 +164,14 @@ struct psp_funcs { int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); + int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); bool (*is_aux_sos_load_required)(struct psp_context *psp); bool (*is_reload_needed)(struct psp_context *psp); + int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val, + enum psp_reg_prog_id id); }; struct ta_funcs { @@ -319,6 +349,14 @@ struct psp_runtime_scpm_entry { enum psp_runtime_scpm_authentication scpm_status; }; +#if defined(CONFIG_DEBUG_FS) +struct spirom_bo { + struct amdgpu_bo *bo; + uint64_t mc_addr; + void *cpu_addr; +}; +#endif + struct psp_context { struct amdgpu_device *adev; struct psp_ring km_ring; @@ -406,6 +444,9 @@ struct psp_context { char *vbflash_tmp_buf; size_t vbflash_image_size; bool vbflash_done; +#if defined(CONFIG_DEBUG_FS) + struct spirom_bo *spirom_dump_trip; +#endif }; struct amdgpu_psp_funcs { @@ -464,6 +505,10 @@ struct amdgpu_psp_funcs { ((psp)->funcs->update_spirom ? \ (psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL) +#define psp_dump_spirom(psp, fw_pri_mc_addr) \ + ((psp)->funcs->dump_spirom ? \ + (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL) + #define psp_vbflash_status(psp) \ ((psp)->funcs->vbflash_stat ? \ (psp)->funcs->vbflash_stat((psp)) : -EINVAL) @@ -475,6 +520,10 @@ struct amdgpu_psp_funcs { #define psp_is_aux_sos_load_required(psp) \ ((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0) +#define psp_reg_program_no_ring(psp, val, id) \ + ((psp)->funcs->reg_program_no_ring ? \ + (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; @@ -486,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block; extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block; extern const struct amdgpu_ip_block_version psp_v14_0_ip_block; -extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t field_val, uint32_t mask, bool check_changed); +int psp_wait_for(struct psp_context *psp, uint32_t reg_index, + uint32_t field_val, uint32_t mask, uint32_t flags); extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, uint32_t field_val, uint32_t mask, uint32_t msec_timeout); @@ -553,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name); int psp_get_fw_attestation_records_addr(struct psp_context *psp, uint64_t *output_ptr); - +int psp_update_fw_reservation(struct psp_context *psp); int psp_load_fw_list(struct psp_context *psp, struct amdgpu_firmware_info **ucode_list, int ucode_count); void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); @@ -569,5 +618,9 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp); int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id, bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable); bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev); +int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val, + enum psp_reg_prog_id id); +void amdgpu_psp_debugfs_init(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 68685aca2835..540817e296da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -77,6 +77,7 @@ const char *ras_block_string[] = { "jpeg", "ih", "mpio", + "mmsch", }; const char *ras_mca_block_string[] = { @@ -1106,6 +1107,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info->de_count, blk_name); } } else { + if (adev->debug_disable_ce_logs) + return; + for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; @@ -1497,6 +1501,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, !amdgpu_ras_get_aca_debug_mode(adev)) return -EOPNOTSUPP; + if (amdgpu_sriov_vf(adev)) + return -EOPNOTSUPP; + /* skip ras error reset in gpu reset */ if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) && ((smu_funcs && smu_funcs->set_debug_mode) || @@ -2120,7 +2127,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) con->badpages_attr = bin_attr_gpu_vram_bad_pages; sysfs_bin_attr_init(&con->badpages_attr); bin_attrs[0] = &con->badpages_attr; - group.bin_attrs_new = bin_attrs; + group.bin_attrs = bin_attrs; } r = sysfs_create_group(&adev->dev->kobj, &group); @@ -2160,7 +2167,7 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) /* Fatal error events are handled on host side */ if (amdgpu_sriov_vf(adev)) return; - /** + /* * If the current interrupt is caused by a non-fatal RAS error, skip * check for fatal error. For fatal errors, FED status of all devices * in XGMI hive gets set when the first device gets fatal error @@ -2850,11 +2857,27 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev, if (amdgpu_umc_pages_in_a_row(adev, err_data, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; + for (i = 0; i < adev->umc.retire_unit; i++) { + err_data->err_addr[i].address = bps[0].address; + err_data->err_addr[i].mem_channel = bps[0].mem_channel; + err_data->err_addr[i].bank = bps[0].bank; + err_data->err_addr[i].err_type = bps[0].err_type; + err_data->err_addr[i].mcumc_id = bps[0].mcumc_id; + } } else { if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data)) return -EINVAL; } } else { + if (bps[0].address == 0) { + /* for specific old eeprom data, mca address is not stored, + * calc it from pa + */ + if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT, + &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE)) + return -EINVAL; + } + if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) { if (nps == AMDGPU_NPS1_PARTITION_MODE) memcpy(err_data->err_addr, bps, @@ -2872,6 +2895,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, struct eeprom_table_record *bps, struct ras_err_data *err_data, enum amdgpu_memory_partition nps) { + int i = 0; enum amdgpu_memory_partition save_nps; save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; @@ -2881,10 +2905,30 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, if (amdgpu_umc_pages_in_a_row(adev, err_data, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; + for (i = 0; i < adev->umc.retire_unit; i++) { + err_data->err_addr[i].address = bps->address; + err_data->err_addr[i].mem_channel = bps->mem_channel; + err_data->err_addr[i].bank = bps->bank; + err_data->err_addr[i].err_type = bps->err_type; + err_data->err_addr[i].mcumc_id = bps->mcumc_id; + } } else { - if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) - return -EINVAL; + if (bps->address) { + if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) + return -EINVAL; + } else { + /* for specific old eeprom data, mca address is not stored, + * calc it from pa + */ + if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT, + &(bps->address), AMDGPU_NPS1_PARTITION_MODE)) + return -EINVAL; + + if (amdgpu_ras_mca2pa(adev, bps, err_data)) + return -EOPNOTSUPP; + } } + return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit); } @@ -2899,7 +2943,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, &adev->psp.ras_context.ras->eeprom_control; enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; int ret = 0; - uint32_t i; + uint32_t i = 0; if (!con || !con->eh_data || !bps || pages <= 0) return 0; @@ -2920,34 +2964,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, mutex_lock(&con->recovery_lock); if (from_rom) { - for (i = 0; i < pages; i++) { - if (control->ras_num_recs - i >= adev->umc.retire_unit) { - if ((bps[i].address == bps[i + 1].address) && - (bps[i].mem_channel == bps[i + 1].mem_channel)) { - //deal with retire_unit records a time - ret = __amdgpu_ras_convert_rec_array_from_rom(adev, - &bps[i], &err_data, nps); - if (ret) - goto free; - i += (adev->umc.retire_unit - 1); + /* there is no pa recs in V3, so skip pa recs processing */ + if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + for (i = 0; i < pages; i++) { + if (control->ras_num_recs - i >= adev->umc.retire_unit) { + if ((bps[i].address == bps[i + 1].address) && + (bps[i].mem_channel == bps[i + 1].mem_channel)) { + /* deal with retire_unit records a time */ + ret = __amdgpu_ras_convert_rec_array_from_rom(adev, + &bps[i], &err_data, nps); + if (ret) + control->ras_num_bad_pages -= adev->umc.retire_unit; + i += (adev->umc.retire_unit - 1); + } else { + break; + } } else { break; } - } else { - break; } } for (; i < pages; i++) { ret = __amdgpu_ras_convert_rec_from_rom(adev, &bps[i], &err_data, nps); if (ret) - goto free; + control->ras_num_bad_pages -= adev->umc.retire_unit; } } else { ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages); } -free: if (from_rom) kfree(err_data.err_addr); mutex_unlock(&con->recovery_lock); @@ -2975,6 +3021,15 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, return 0; } + if (!con->eeprom_control.is_eeprom_valid) { + dev_warn(adev->dev, + "Failed to save EEPROM table data because of EEPROM data corruption!"); + if (new_cnt) + *new_cnt = 0; + + return 0; + } + mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; @@ -3036,21 +3091,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) dev_err(adev->dev, "Failed to load EEPROM table records!"); } else { if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) { - for (i = 0; i < control->ras_num_recs; i++) { - if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { - if ((bps[i].address == bps[i + 1].address) && - (bps[i].mem_channel == bps[i + 1].mem_channel)) { - control->ras_num_pa_recs += adev->umc.retire_unit; - i += (adev->umc.retire_unit - 1); + /*In V3, there is no pa recs, and some cases(when address==0) may be parsed + as pa recs, so add verion check to avoid it. + */ + if (control->tbl_hdr.version < RAS_TABLE_VER_V3) { + for (i = 0; i < control->ras_num_recs; i++) { + if ((control->ras_num_recs - i) >= adev->umc.retire_unit) { + if ((bps[i].address == bps[i + 1].address) && + (bps[i].mem_channel == bps[i + 1].mem_channel)) { + control->ras_num_pa_recs += adev->umc.retire_unit; + i += (adev->umc.retire_unit - 1); + } else { + control->ras_num_mca_recs += + (control->ras_num_recs - i); + break; + } } else { - control->ras_num_mca_recs += - (control->ras_num_recs - i); + control->ras_num_mca_recs += (control->ras_num_recs - i); break; } - } else { - control->ras_num_mca_recs += (control->ras_num_recs - i); - break; } + } else { + control->ras_num_mca_recs = control->ras_num_recs; } } @@ -3259,7 +3321,6 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, uint64_t de_queried_count; uint32_t new_detect_count, total_detect_count; uint32_t need_query_count = poison_creation_count; - bool query_data_timeout = false; enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; memset(&info, 0, sizeof(info)); @@ -3288,21 +3349,13 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; if (timeout) { - if (!--timeout) { - query_data_timeout = true; + if (!--timeout) break; - } msleep(1); } } } while (total_detect_count < need_query_count); - if (query_data_timeout) { - dev_warn(adev->dev, "Can't find deferred error! count: %u\n", - (need_query_count - total_detect_count)); - return -ENOENT; - } - if (total_detect_count) schedule_delayed_work(&ras->page_retirement_dwork, 0); @@ -3453,16 +3506,21 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) control = &con->eeprom_control; ret = amdgpu_ras_eeprom_init(control); - if (ret) - return ret; + control->is_eeprom_valid = !ret; if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr) control->ras_num_pa_recs = control->ras_num_recs; - if (control->ras_num_recs) { + if (adev->umc.ras && + adev->umc.ras->get_retire_flip_bits) + adev->umc.ras->get_retire_flip_bits(adev); + + if (control->ras_num_recs && control->is_eeprom_valid) { ret = amdgpu_ras_load_bad_pages(adev); - if (ret) - return ret; + if (ret) { + control->is_eeprom_valid = false; + return 0; + } amdgpu_dpm_send_hbm_bad_pages_num( adev, control->ras_num_bad_pages); @@ -3481,7 +3539,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n"); } - return ret; + return 0; } int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) @@ -3690,7 +3748,8 @@ static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev */ if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) || amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) || - amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3)) + amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) || + amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1)) adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 1 << AMDGPU_RAS_BLOCK__JPEG); else @@ -3792,10 +3851,12 @@ init_ras_enabled_flag: adev->ras_hw_enabled & amdgpu_ras_mask; /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */ - adev->aca.is_enabled = - (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)); + if (!amdgpu_sriov_vf(adev)) { + adev->aca.is_enabled = + (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)); + } /* bad page feature is not applicable to specific app platform */ if (adev->gmc.is_app_apu && @@ -4372,8 +4433,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev) struct amdgpu_ras *ras; ras = amdgpu_ras_get_context(adev); - if (ras) + if (ras) { ras->ras_err_state = 0; + ras->gpu_reset_flags = 0; + } } void amdgpu_ras_set_err_poison(struct amdgpu_device *adev, @@ -4478,8 +4541,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) enum ras_event_type type = RAS_EVENT_TYPE_FATAL; u64 event_id; - if (amdgpu_ras_mark_ras_event(adev, type)) + if (amdgpu_ras_mark_ras_event(adev, type)) { + dev_err(adev->dev, + "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n"); return; + } event_id = amdgpu_ras_acquire_event_id(adev, type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 764e9fa0a914..927d6bff734a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -98,6 +98,7 @@ enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__JPEG, AMDGPU_RAS_BLOCK__IH, AMDGPU_RAS_BLOCK__MPIO, + AMDGPU_RAS_BLOCK__MMSCH, AMDGPU_RAS_BLOCK__LAST, AMDGPU_RAS_BLOCK__ANY = -1 @@ -795,6 +796,12 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) { return TA_RAS_BLOCK__VCN; case AMDGPU_RAS_BLOCK__JPEG: return TA_RAS_BLOCK__JPEG; + case AMDGPU_RAS_BLOCK__IH: + return TA_RAS_BLOCK__IH; + case AMDGPU_RAS_BLOCK__MPIO: + return TA_RAS_BLOCK__MPIO; + case AMDGPU_RAS_BLOCK__MMSCH: + return TA_RAS_BLOCK__MMSCH; default: WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); return TA_RAS_BLOCK__UMC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 0ea7cfaf3587..9bda9ad13f88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -277,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control) up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Failed to write EEPROM table header:%d", res); + dev_err(adev->dev, "Failed to write EEPROM table header:%d", + res); } else if (res < RAS_TABLE_HEADER_SIZE) { - DRM_ERROR("Short write:%d out of %d\n", - res, RAS_TABLE_HEADER_SIZE); + dev_err(adev->dev, "Short write:%d out of %d\n", res, + RAS_TABLE_HEADER_SIZE); res = -EIO; } else { res = 0; @@ -323,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("Failed to alloc buf to write table ras info\n"); + dev_err(adev->dev, + "Failed to alloc buf to write table ras info\n"); return -ENOMEM; } @@ -338,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Failed to write EEPROM table ras info:%d", res); + dev_err(adev->dev, "Failed to write EEPROM table ras info:%d", + res); } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { - DRM_ERROR("Short write:%d out of %d\n", - res, RAS_TABLE_V2_1_INFO_SIZE); + dev_err(adev->dev, "Short write:%d out of %d\n", res, + RAS_TABLE_V2_1_INFO_SIZE); res = -EIO; } else { res = 0; @@ -418,6 +421,7 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control hdr->version = RAS_TABLE_VER_V2_1; return; case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 5, 0): hdr->version = RAS_TABLE_VER_V3; return; default: @@ -475,6 +479,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) control->ras_num_recs = 0; control->ras_num_bad_pages = 0; + control->ras_num_mca_recs = 0; + control->ras_num_pa_recs = 0; control->ras_fri = 0; amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages); @@ -606,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control, buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Writing %d EEPROM table records error:%d", - num, res); + dev_err(adev->dev, "Writing %d EEPROM table records error:%d", + num, res); } else if (res < buf_size) { /* Short write, return error. */ - DRM_ERROR("Wrote %d records out of %d", - res / RAS_TABLE_RECORD_SIZE, num); + dev_err(adev->dev, "Wrote %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); res = -EIO; } else { res = 0; @@ -760,18 +766,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) dev_warn(adev->dev, "Saved bad pages %d reaches threshold value %d\n", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); - control->tbl_hdr.header = RAS_TABLE_HDR_BAD; - if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { - control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; - control->tbl_rai.health_percent = 0; - } - if ((amdgpu_bad_page_threshold != -1) && - (amdgpu_bad_page_threshold != -2)) + (amdgpu_bad_page_threshold != -2)) { + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } ras->is_rma = true; - - /* ignore the -ENOTSUPP return value */ - amdgpu_dpm_send_rma_reason(adev); + /* ignore the -ENOTSUPP return value */ + amdgpu_dpm_send_rma_reason(adev); + } } if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) @@ -786,8 +791,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("allocating memory for table of size %d bytes failed\n", - control->tbl_hdr.tbl_size); + dev_err(adev->dev, + "allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); res = -ENOMEM; goto Out; } @@ -799,12 +805,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("EEPROM failed reading records:%d\n", - res); + dev_err(adev->dev, "EEPROM failed reading records:%d\n", res); goto Out; } else if (res < buf_size) { - DRM_ERROR("EEPROM read %d out of %d bytes\n", - res, buf_size); + dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res, + buf_size); res = -EIO; goto Out; } @@ -865,11 +870,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, return 0; if (num == 0) { - DRM_ERROR("will not append 0 records\n"); + dev_err(adev->dev, "will not append 0 records\n"); return -EINVAL; } else if (num > control->ras_max_record_count) { - DRM_ERROR("cannot append %d records than the size of table %d\n", - num, control->ras_max_record_count); + dev_err(adev->dev, + "cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); return -EINVAL; } @@ -923,13 +929,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Reading %d EEPROM table records error:%d", - num, res); + dev_err(adev->dev, "Reading %d EEPROM table records error:%d", + num, res); } else if (res < buf_size) { /* Short read, return error. */ - DRM_ERROR("Read %d records out of %d", - res / RAS_TABLE_RECORD_SIZE, num); + dev_err(adev->dev, "Read %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); res = -EIO; } else { res = 0; @@ -963,11 +969,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, return 0; if (num == 0) { - DRM_ERROR("will not read 0 records\n"); + dev_err(adev->dev, "will not read 0 records\n"); return -EINVAL; } else if (num > control->ras_num_recs) { - DRM_ERROR("too many records to read:%d available:%d\n", - num, control->ras_num_recs); + dev_err(adev->dev, "too many records to read:%d available:%d\n", + num, control->ras_num_recs); return -EINVAL; } @@ -1299,7 +1305,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { - DRM_ERROR("Out of memory checking RAS table checksum.\n"); + dev_err(adev->dev, + "Out of memory checking RAS table checksum.\n"); return -ENOMEM; } @@ -1308,7 +1315,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control control->ras_header_offset, buf, buf_size); if (res < buf_size) { - DRM_ERROR("Partial read for checksum, res:%d\n", res); + dev_err(adev->dev, "Partial read for checksum, res:%d\n", res); /* On partial reads, return -EIO. */ if (res >= 0) @@ -1333,7 +1340,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n"); + dev_err(adev->dev, + "Failed to alloc buf to read EEPROM table ras info\n"); return -ENOMEM; } @@ -1345,7 +1353,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) control->i2c_address + control->ras_info_offset, buf, RAS_TABLE_V2_1_INFO_SIZE); if (res < RAS_TABLE_V2_1_INFO_SIZE) { - DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res); + dev_err(adev->dev, + "Failed to read EEPROM table ras info, res:%d", res); res = res >= 0 ? -EIO : res; goto Out; } @@ -1386,23 +1395,46 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) control->i2c_address + control->ras_header_offset, buf, RAS_TABLE_HEADER_SIZE); if (res < RAS_TABLE_HEADER_SIZE) { - DRM_ERROR("Failed to read EEPROM table header, res:%d", res); + dev_err(adev->dev, "Failed to read EEPROM table header, res:%d", + res); return res >= 0 ? -EIO : res; } __decode_table_header_from_buf(hdr, buf); - if (hdr->version >= RAS_TABLE_VER_V2_1) { + if (hdr->header != RAS_TABLE_HDR_VAL && + hdr->header != RAS_TABLE_HDR_BAD) { + dev_info(adev->dev, "Creating a new EEPROM table"); + return amdgpu_ras_eeprom_reset_table(control); + } + + switch (hdr->version) { + case RAS_TABLE_VER_V2_1: + case RAS_TABLE_VER_V3: control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr); control->ras_record_offset = RAS_RECORD_START_V2_1; control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; - } else { + break; + case RAS_TABLE_VER_V1: control->ras_num_recs = RAS_NUM_RECS(hdr); control->ras_record_offset = RAS_RECORD_START; control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + break; + default: + dev_err(adev->dev, + "RAS header invalid, unsupported version: %u", + hdr->version); + return -EINVAL; + } + + if (control->ras_num_recs > control->ras_max_record_count) { + dev_err(adev->dev, + "RAS header invalid, records in header: %u max allowed :%u", + control->ras_num_recs, control->ras_max_record_count); + return -EINVAL; } - control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); + control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); control->ras_num_mca_recs = 0; control->ras_num_pa_recs = 0; return 0; @@ -1413,7 +1445,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) struct amdgpu_device *adev = to_amdgpu_device(control); struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - int res; + int res = 0; if (!__is_ras_eeprom_supported(adev)) return 0; @@ -1429,8 +1461,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) control->ras_num_mca_recs * adev->umc.retire_unit; if (hdr->header == RAS_TABLE_HDR_VAL) { - DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", - control->ras_num_bad_pages); + dev_dbg(adev->dev, + "Found existing EEPROM table with %d records", + control->ras_num_bad_pages); if (hdr->version >= RAS_TABLE_VER_V2_1) { res = __read_table_ras_info(control); @@ -1494,11 +1527,35 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) "User defined threshold is set, runtime service will be halt when threshold is reached\n"); } } - } else { - DRM_INFO("Creating a new EEPROM table"); - - res = amdgpu_ras_eeprom_reset_table(control); } return res < 0 ? res : 0; } + +void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control; + int res; + + if (!__is_ras_eeprom_supported(adev) || !ras) + return; + control = &ras->eeprom_control; + if (!control->is_eeprom_valid) + return; + res = __verify_ras_table_checksum(control); + if (res) { + dev_warn(adev->dev, + "RAS table incorrect checksum or error:%d, try to recover\n", + res); + if (!amdgpu_ras_eeprom_reset_table(control)) + if (!amdgpu_ras_save_bad_pages(adev, NULL)) + if (!__verify_ras_table_checksum(control)) { + dev_info(adev->dev, "RAS table recovery succeed\n"); + return; + } + dev_err(adev->dev, "RAS table recovery failed\n"); + control->is_eeprom_valid = false; + } + return; +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ec6d7ea37ad0..ebfca4cb5688 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -114,6 +114,8 @@ struct amdgpu_ras_eeprom_control { /* Record channel info which occurred bad pages */ u32 bad_channel_bitmap; + + bool is_eeprom_valid; }; /* @@ -159,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control); +void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev); + extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index d55c8b7fdb59..6379bb25bf5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) return 0; } +/** + * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit + * + * @ring: amdgpu_ring structure holding ring information + * @ndw: number of dwords to allocate in the ring buffer + * + * Allocate @ndw dwords in the ring buffer (all asics). + * doesn't check the max_dw limit as we may be reemitting + * several submissions. + */ +static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw) +{ + /* Align requested size with padding so unlock_commit can + * pad safely */ + ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; + + ring->count_dw = ndw; + ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); +} + /** amdgpu_ring_insert_nop - insert NOP packets * * @ring: amdgpu_ring structure holding ring information @@ -187,14 +210,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) } #define amdgpu_ring_get_gpu_addr(ring, offset) \ - (ring->is_mes_queue ? \ - (ring->mes_ctx->meta_data_gpu_addr + offset) : \ - (ring->adev->wb.gpu_addr + offset * 4)) + (ring->adev->wb.gpu_addr + offset * 4) #define amdgpu_ring_get_cpu_addr(ring, offset) \ - (ring->is_mes_queue ? \ - (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \ - (&ring->adev->wb.wb[offset])) + (&ring->adev->wb.wb[offset]) /** * amdgpu_ring_init - init driver ring struct. @@ -243,57 +262,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->sched_score = sched_score; ring->vmid_wait = dma_fence_get_stub(); - if (!ring->is_mes_queue) { - ring->idx = adev->num_rings++; - adev->rings[ring->idx] = ring; - } + ring->idx = adev->num_rings++; + adev->rings[ring->idx] = ring; r = amdgpu_fence_driver_init_ring(ring); if (r) return r; } - if (ring->is_mes_queue) { - ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_RPTR_OFFS); - ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_WPTR_OFFS); - ring->fence_offs = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_FENCE_OFFS); - ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_TRAIL_FENCE_OFFS); - ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_COND_EXE_OFFS); - } else { - r = amdgpu_device_wb_get(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &ring->rptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); + return r; + } - r = amdgpu_device_wb_get(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &ring->wptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); + return r; + } - r = amdgpu_device_wb_get(adev, &ring->fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &ring->fence_offs); + if (r) { + dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); + return r; + } - r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs); - if (r) { - dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs); + if (r) { + dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r); + return r; + } - r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); - if (r) { - dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); + if (r) { + dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); + return r; } ring->fence_gpu_addr = @@ -352,19 +356,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Initialize cached_rptr to 0 */ ring->cached_rptr = 0; - /* Allocate ring buffer */ - if (ring->is_mes_queue) { - int offset = 0; - - BUG_ON(ring->ring_size > PAGE_SIZE*4); - - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_RING_OFFS); - ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - amdgpu_ring_clear_ring(ring); + if (!ring->ring_backup) { + ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL); + if (!ring->ring_backup) + return -ENOMEM; + } - } else if (ring->ring_obj == NULL) { + /* Allocate ring buffer */ + if (ring->ring_obj == NULL) { r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &ring->ring_obj, @@ -372,6 +371,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); + kvfree(ring->ring_backup); return r; } amdgpu_ring_clear_ring(ring); @@ -401,32 +401,28 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) { /* Not to finish a ring which is not initialized */ - if (!(ring->adev) || - (!ring->is_mes_queue && !(ring->adev->rings[ring->idx]))) + if (!(ring->adev) || !(ring->adev->rings[ring->idx])) return; ring->sched.ready = false; - if (!ring->is_mes_queue) { - amdgpu_device_wb_free(ring->adev, ring->rptr_offs); - amdgpu_device_wb_free(ring->adev, ring->wptr_offs); + amdgpu_device_wb_free(ring->adev, ring->rptr_offs); + amdgpu_device_wb_free(ring->adev, ring->wptr_offs); - amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); - amdgpu_device_wb_free(ring->adev, ring->fence_offs); + amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); + amdgpu_device_wb_free(ring->adev, ring->fence_offs); - amdgpu_bo_free_kernel(&ring->ring_obj, - &ring->gpu_addr, - (void **)&ring->ring); - } else { - kfree(ring->fence_drv.fences); - } + amdgpu_bo_free_kernel(&ring->ring_obj, + &ring->gpu_addr, + (void **)&ring->ring); + kvfree(ring->ring_backup); + ring->ring_backup = NULL; dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; ring->me = 0; - if (!ring->is_mes_queue) - ring->adev->rings[ring->idx] = NULL; + ring->adev->rings[ring->idx] = NULL; } /** @@ -463,6 +459,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { unsigned long flags; ktime_t deadline; + bool ret; if (unlikely(ring->adev->debug_disable_soft_recovery)) return false; @@ -477,12 +474,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, dma_fence_set_error(fence, -ENODATA); spin_unlock_irqrestore(fence->lock, flags); - atomic_inc(&ring->adev->gpu_reset_counter); while (!dma_fence_is_signaled(fence) && ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) ring->funcs->soft_recovery(ring, vmid); - return dma_fence_is_signaled(fence); + ret = dma_fence_is_signaled(fence); + /* increment the counter only if soft reset worked */ + if (ret) + atomic_inc(&ring->adev->gpu_reset_counter); + + return ret; } /* @@ -608,59 +609,17 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_ring *ring = file_inode(f)->i_private; - volatile u32 *mqd; - u32 *kbuf; - int r, i; - uint32_t value, result; - - if (*pos & 3 || size & 3) - return -EINVAL; - - kbuf = kmalloc(ring->mqd_size, GFP_KERNEL); - if (!kbuf) - return -ENOMEM; + ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size); + void *from = ((u8 *)ring->mqd_ptr) + *pos; - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto err_free; + if (*pos > ring->mqd_size) + return 0; - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); - if (r) - goto err_unreserve; - - /* - * Copy to local buffer to avoid put_user(), which might fault - * and acquire mmap_sem, under reservation_ww_class_mutex. - */ - for (i = 0; i < ring->mqd_size/sizeof(u32); i++) - kbuf[i] = mqd[i]; - - amdgpu_bo_kunmap(ring->mqd_obj); - amdgpu_bo_unreserve(ring->mqd_obj); - - result = 0; - while (size) { - if (*pos >= ring->mqd_size) - break; - - value = kbuf[*pos/4]; - r = put_user(value, (uint32_t *)buf); - if (r) - goto err_free; - buf += 4; - result += 4; - size -= 4; - *pos += 4; - } - - kfree(kbuf); - return result; + if (copy_to_user(buf, from, bytes)) + return -EFAULT; -err_unreserve: - amdgpu_bo_unreserve(ring->mqd_obj); -err_free: - kfree(kbuf); - return r; + *pos += bytes; + return bytes; } static const struct file_operations amdgpu_debugfs_mqd_fops = { @@ -760,6 +719,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, prop->eop_gpu_addr = ring->eop_gpu_addr; prop->use_doorbell = ring->use_doorbell; prop->doorbell_index = ring->doorbell_index; + prop->kernel_queue = true; /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. @@ -831,3 +791,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) return true; } + +void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ + drm_sched_wqueue_stop(&ring->sched); + /* back up the non-guilty commands */ + amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); +} + +int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + unsigned int i; + int r; + + /* verify that the ring is functional */ + r = amdgpu_ring_test_ring(ring); + if (r) + return r; + + /* signal the fence of the bad job */ + if (guilty_fence) + amdgpu_fence_driver_guilty_force_completion(guilty_fence); + /* Re-emit the non-guilty commands */ + if (ring->ring_backup_entries_to_copy) { + amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy); + for (i = 0; i < ring->ring_backup_entries_to_copy; i++) + amdgpu_ring_write(ring, ring->ring_backup[i]); + amdgpu_ring_commit(ring); + } + /* Start the scheduler again */ + drm_sched_wqueue_start(&ring->sched); + return 0; +} + +bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, + u32 reset_type) +{ + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + if (ring->adev->gfx.gfx_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_COMPUTE: + if (ring->adev->gfx.compute_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_SDMA: + if (ring->adev->sdma.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_DEC: + case AMDGPU_RING_TYPE_VCN_ENC: + if (ring->adev->vcn.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_JPEG: + if (ring->adev->jpeg.supported_reset & reset_type) + return true; + break; + default: + break; + } + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bb2b66385223..7670f5d82b9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -118,6 +118,7 @@ struct amdgpu_fence_driver { /* sync_seq is protected by ring emission lock */ uint32_t sync_seq; atomic_t last_seq; + u64 signalled_wptr; bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; @@ -127,11 +128,35 @@ struct amdgpu_fence_driver { struct dma_fence **fences; }; +/* + * Fences mark an event in the GPUs pipeline and are used + * for GPU/CPU synchronization. When the fence is written, + * it is expected that all buffers associated with that fence + * are no longer in use by the associated ring on the GPU and + * that the relevant GPU caches have been flushed. + */ + +struct amdgpu_fence { + struct dma_fence base; + + /* RB, DMA, etc. */ + struct amdgpu_ring *ring; + ktime_t start_timestamp; + + /* wptr for the fence for resets */ + u64 wptr; + /* fence context for resets */ + u64 context; + uint32_t seq; +}; + extern const struct drm_sched_backend_ops amdgpu_sched_ops; void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); +void amdgpu_fence_save_wptr(struct dma_fence *fence); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -141,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, - unsigned flags); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + struct amdgpu_fence *af, unsigned int flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); @@ -164,8 +189,24 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, /* provided by hw blocks that expose a ring buffer for commands */ struct amdgpu_ring_funcs { + /** + * @type: + * + * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER + * use ring buffers. The type field just identifies which component the + * ring buffer is associated with. + */ enum amdgpu_ring_type type; uint32_t align_mask; + + /** + * @nop: + * + * Every block in the amdgpu has no-op instructions (e.g., GFX 10 + * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP, + * etc). This field receives the specific no-op for the component + * that initializes the ring. + */ u32 nop; bool support_64bit_ptrs; bool no_user_fence; @@ -236,11 +277,14 @@ struct amdgpu_ring_funcs { void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); - int (*reset)(struct amdgpu_ring *ring, unsigned int vmid); + int (*reset)(struct amdgpu_ring *ring, unsigned int vmid, + struct amdgpu_fence *timedout_fence); void (*emit_cleaner_shader)(struct amdgpu_ring *ring); - bool (*is_guilty)(struct amdgpu_ring *ring); }; +/** + * amdgpu_ring - Holds ring information + */ struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; @@ -249,16 +293,67 @@ struct amdgpu_ring { struct amdgpu_bo *ring_obj; uint32_t *ring; + /* backups for resets */ + uint32_t *ring_backup; + unsigned int ring_backup_entries_to_copy; unsigned rptr_offs; u64 rptr_gpu_addr; volatile u32 *rptr_cpu_addr; + + /** + * @wptr: + * + * This is part of the Ring buffer implementation and represents the + * write pointer. The wptr determines where the host has written. + */ u64 wptr; + + /** + * @wptr_old: + * + * Before update wptr with the new value, usually the old value is + * stored in the wptr_old. + */ u64 wptr_old; unsigned ring_size; + + /** + * @max_dw: + * + * Maximum number of DWords for ring allocation. This information is + * provided at the ring initialization time, and each IP block can + * specify a specific value. Check places that invoke + * amdgpu_ring_init() to see the maximum size per block. + */ unsigned max_dw; + + /** + * @count_dw: + * + * This value starts with the maximum amount of DWords supported by the + * ring. This value is updated based on the ring manipulation. + */ int count_dw; uint64_t gpu_addr; + + /** + * @ptr_mask: + * + * Some IPs provide support for 64-bit pointers and others for 32-bit + * only; this behavior is component-specific and defined by the field + * support_64bit_ptr. If the IP block supports 64-bits, the mask + * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask. + * Notice that this field is used to keep wptr under a valid range. + */ uint64_t ptr_mask; + + /** + * @buf_mask: + * + * Buffer mask is a value used to keep wptr count under its + * thresholding. Buffer mask initialized during the ring buffer + * initialization time, and it is defined as (ring_size / 4) -1. + */ uint32_t buf_mask; u32 idx; u32 xcc_id; @@ -276,6 +371,13 @@ struct amdgpu_ring { bool use_pollmem; unsigned wptr_offs; u64 wptr_gpu_addr; + + /** + * @wptr_cpu_addr: + * + * This is the CPU address pointer in the writeback slot. This is used + * to commit changes to the GPU. + */ volatile u32 *wptr_cpu_addr; unsigned fence_offs; u64 fence_gpu_addr; @@ -297,20 +399,15 @@ struct amdgpu_ring { struct dma_fence *vmid_wait; bool has_compute_vm_bug; bool no_scheduler; + bool no_user_submission; int hw_prio; unsigned num_hw_submission; atomic_t *sched_score; - /* used for mes */ - bool is_mes_queue; - uint32_t hw_queue_id; - struct amdgpu_mes_ctx_data *mes_ctx; - bool is_sw_ring; unsigned int entry_index; /* store the cached rptr to restore after reset */ uint64_t cached_rptr; - }; #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib))) @@ -340,7 +437,7 @@ struct amdgpu_ring { #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) -#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v)) +#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f)) unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); @@ -435,15 +532,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring, ring->ring[offset] = cur - offset; } -#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \ - (ring->is_mes_queue && ring->mes_ctx ? \ - (ring->mes_ctx->meta_data_gpu_addr + offset) : 0) - -#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \ - (ring->is_mes_queue && ring->mes_ctx ? \ - (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \ - NULL) - int amdgpu_ring_test_helper(struct amdgpu_ring *ring); void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, @@ -474,4 +562,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); +void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, + u32 reset_type); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index 1c66da1c3fb4..7e7d6c3865bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -124,7 +124,7 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux) } } - del_timer(&mux->resubmit_timer); + timer_delete(&mux->resubmit_timer); mux->s_resubmit = false; } @@ -135,7 +135,8 @@ static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux) static void amdgpu_mux_resubmit_fallback(struct timer_list *t) { - struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer); + struct amdgpu_ring_mux *mux = timer_container_of(mux, t, + resubmit_timer); if (!spin_trylock(&mux->lock)) { amdgpu_ring_mux_schedule_resubmit(mux); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index fce22d3f816b..c210625be220 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -237,6 +237,20 @@ struct amdgpu_rlc_funcs { void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id); int (*init)(struct amdgpu_device *adev); u32 (*get_csb_size)(struct amdgpu_device *adev); + + /** + * @get_csb_buffer: Get the clear state to be put into the hardware. + * + * The parameter adev is used to get the CS data and other gfx info, + * and buffer is the RLC CS pointer + * + * Sometimes, the user space puts a request to clear the state in the + * command buffer; this function provides the clear state that gets put + * into the hardware. Note that the driver programs Clear State + * Indirect Buffer (CSB) explicitly when it sets up the kernel rings, + * and it also provides a pointer to it which is used by the firmware + * to load the clear state in some cases. + */ void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); int (*get_cp_table_num)(struct amdgpu_device *adev); int (*resume)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 529c9696c2f3..8b8a04138711 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -26,6 +26,8 @@ #include "amdgpu_sdma.h" #include "amdgpu_ras.h" #include "amdgpu_reset.h" +#include "gc/gc_10_1_0_offset.h" +#include "gc/gc_10_3_0_sh_mask.h" #define AMDGPU_CSA_SDMA_SIZE 64 /* SDMA CSA reside in the 3rd page of CSA */ @@ -76,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp) return 0; - if (ring->is_mes_queue) { - uint32_t offset = 0; + r = amdgpu_sdma_get_index_from_ring(ring, &index); - offset = offsetof(struct amdgpu_mes_ctx_meta_data, - sdma[ring->idx].sdma_meta_data); - csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - } else { - r = amdgpu_sdma_get_index_from_ring(ring, &index); - - if (r || index > 31) - csa_mc_addr = 0; - else - csa_mc_addr = amdgpu_csa_vaddr(adev) + - AMDGPU_CSA_SDMA_OFFSET + - index * AMDGPU_CSA_SDMA_SIZE; - } + if (r || index > 31) + csa_mc_addr = 0; + else + csa_mc_addr = amdgpu_csa_vaddr(adev) + + AMDGPU_CSA_SDMA_OFFSET + + index * AMDGPU_CSA_SDMA_SIZE; return csa_mc_addr; } @@ -537,110 +531,78 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin return false; } -/** - * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks - * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions - * - * This function allows KFD and AMDGPU to register their own callbacks for handling - * pre-reset and post-reset operations for engine reset. These are needed because engine - * reset will stop all queues on that engine. - */ -void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs) +static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id) { - if (!funcs) - return; + struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; - /* Ensure the reset_callback_list is initialized */ - if (!adev->sdma.reset_callback_list.next) { - INIT_LIST_HEAD(&adev->sdma.reset_callback_list); - } - /* Initialize the list node in the callback structure */ - INIT_LIST_HEAD(&funcs->list); + if (sdma_instance->funcs->soft_reset_kernel_queue) + return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id); - /* Add the callback structure to the global list */ - list_add_tail(&funcs->list, &adev->sdma.reset_callback_list); + return -EOPNOTSUPP; } /** * amdgpu_sdma_reset_engine - Reset a specific SDMA engine * @adev: Pointer to the AMDGPU device - * @instance_id: ID of the SDMA engine instance to reset - * - * This function performs the following steps: - * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state. - * 2. Resets the specified SDMA engine instance. - * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state. + * @instance_id: Logical ID of the SDMA engine instance to reset + * @caller_handles_kernel_queues: Skip kernel queue processing. Caller + * will handle it. * * Returns: 0 on success, or a negative error code on failure. */ -int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) +int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, + bool caller_handles_kernel_queues) { - struct sdma_on_reset_funcs *funcs; int ret = 0; struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; struct amdgpu_ring *gfx_ring = &sdma_instance->ring; struct amdgpu_ring *page_ring = &sdma_instance->page; - bool gfx_sched_stopped = false, page_sched_stopped = false; mutex_lock(&sdma_instance->engine_reset_mutex); - /* Stop the scheduler's work queue for the GFX and page rings if they are running. - * This ensures that no new tasks are submitted to the queues while - * the reset is in progress. - */ - if (!amdgpu_ring_sched_ready(gfx_ring)) { + + if (!caller_handles_kernel_queues) { + /* Stop the scheduler's work queue for the GFX and page rings if they are running. + * This ensures that no new tasks are submitted to the queues while + * the reset is in progress. + */ drm_sched_wqueue_stop(&gfx_ring->sched); - gfx_sched_stopped = true; - } - if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) { - drm_sched_wqueue_stop(&page_ring->sched); - page_sched_stopped = true; + if (adev->sdma.has_page_queue) + drm_sched_wqueue_stop(&page_ring->sched); } - /* Invoke all registered pre_reset callbacks */ - list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) { - if (funcs->pre_reset) { - ret = funcs->pre_reset(adev, instance_id); - if (ret) { - dev_err(adev->dev, - "beforeReset callback failed for instance %u: %d\n", - instance_id, ret); - goto exit; - } - } + if (sdma_instance->funcs->stop_kernel_queue) { + sdma_instance->funcs->stop_kernel_queue(gfx_ring); + if (adev->sdma.has_page_queue) + sdma_instance->funcs->stop_kernel_queue(page_ring); } /* Perform the SDMA reset for the specified instance */ - ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id); + ret = amdgpu_sdma_soft_reset(adev, instance_id); if (ret) { - dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id); + dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id); goto exit; } - /* Invoke all registered post_reset callbacks */ - list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) { - if (funcs->post_reset) { - ret = funcs->post_reset(adev, instance_id); - if (ret) { - dev_err(adev->dev, - "afterReset callback failed for instance %u: %d\n", - instance_id, ret); - goto exit; - } - } + if (sdma_instance->funcs->start_kernel_queue) { + sdma_instance->funcs->start_kernel_queue(gfx_ring); + if (adev->sdma.has_page_queue) + sdma_instance->funcs->start_kernel_queue(page_ring); } exit: - /* Restart the scheduler's work queue for the GFX and page rings - * if they were stopped by this function. This allows new tasks - * to be submitted to the queues after the reset is complete. - */ - if (!ret) { - if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) { + if (!caller_handles_kernel_queues) { + /* Restart the scheduler's work queue for the GFX and page rings + * if they were stopped by this function. This allows new tasks + * to be submitted to the queues after the reset is complete. + */ + if (!ret) { + amdgpu_fence_driver_force_completion(gfx_ring); drm_sched_wqueue_start(&gfx_ring->sched); - } - if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) { - drm_sched_wqueue_start(&page_ring->sched); + if (adev->sdma.has_page_queue) { + amdgpu_fence_driver_force_completion(page_ring); + drm_sched_wqueue_start(&page_ring->sched); + } } } mutex_unlock(&sdma_instance->engine_reset_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 47d56fd0589f..34311f32be4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -50,6 +50,12 @@ enum amdgpu_sdma_irq { #define NUM_SDMA(x) hweight32(x) +struct amdgpu_sdma_funcs { + int (*stop_kernel_queue)(struct amdgpu_ring *ring); + int (*start_kernel_queue)(struct amdgpu_ring *ring); + int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id); +}; + struct amdgpu_sdma_instance { /* SDMA firmware */ const struct firmware *fw; @@ -68,7 +74,7 @@ struct amdgpu_sdma_instance { /* track guilty state of GFX and PAGE queues */ bool gfx_guilty; bool page_guilty; - + const struct amdgpu_sdma_funcs *funcs; }; enum amdgpu_sdma_ras_memory_id { @@ -103,17 +109,11 @@ struct amdgpu_sdma_ras { struct amdgpu_ras_block_object ras_block; }; -struct sdma_on_reset_funcs { - int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id); - int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id); - /* Linked list node to store this structure in a list; */ - struct list_head list; -}; - struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; + struct amdgpu_irq_src fence_irq; struct amdgpu_irq_src ecc_irq; struct amdgpu_irq_src vm_hole_irq; struct amdgpu_irq_src doorbell_invalid_irq; @@ -131,6 +131,8 @@ struct amdgpu_sdma { uint32_t *ip_dump; uint32_t supported_reset; struct list_head reset_callback_list; + bool no_user_submission; + bool disable_uq; }; /* @@ -170,8 +172,8 @@ struct amdgpu_buffer_funcs { uint32_t byte_count); }; -void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs); -int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id); +int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, + bool caller_handles_kernel_queues); #define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c index e22cb2b5cd92..d45ebfb642ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -45,7 +45,11 @@ */ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) { - return AMDGPU_VA_RESERVED_SEQ64_START(adev); + u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev); + + addr = amdgpu_gmc_sign_extend(addr); + + return addr; } /** @@ -63,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va) { + u64 seq64_addr, va_flags; struct amdgpu_bo *bo; struct drm_exec exec; - u64 seq64_addr; int r; bo = adev->seq64.sbo; @@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error; } - seq64_addr = amdgpu_seq64_get_va_base(adev); + seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK; + + va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC); r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE, - AMDGPU_PTE_READABLE); + va_flags); if (r) { DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); amdgpu_vm_bo_del(adev, *bo_va); @@ -133,7 +139,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv) vm = &fpriv->vm; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) @@ -156,6 +162,7 @@ error: * * @adev: amdgpu_device pointer * @va: VA to access the seq in process address space + * @gpu_addr: GPU address to access the seq * @cpu_addr: CPU address to access the seq * * Alloc a 64 bit memory from seq64 pool. @@ -163,7 +170,8 @@ error: * Returns: * 0 on success or a negative error code on failure */ -int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr) +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, + u64 *gpu_addr, u64 **cpu_addr) { unsigned long bit_pos; @@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr) return -ENOSPC; __set_bit(bit_pos, adev->seq64.used); + *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev); + + if (gpu_addr) + *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr; + *cpu_addr = bit_pos + adev->seq64.cpu_base_addr; return 0; @@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev) */ r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &adev->seq64.sbo, NULL, + &adev->seq64.sbo, &adev->seq64.gpu_addr, (void **)&adev->seq64.cpu_base_addr); if (r) { dev_warn(adev->dev, "(%d) create seq64 failed\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h index 4203b2ab318d..26a249aaaee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h @@ -32,13 +32,14 @@ struct amdgpu_seq64 { struct amdgpu_bo *sbo; u32 num_sem; + u64 gpu_addr; u64 *cpu_base_addr; DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS); }; void amdgpu_seq64_fini(struct amdgpu_device *adev); int amdgpu_seq64_init(struct amdgpu_device *adev); -int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr); +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr); void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr); int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 5576ed0b508f..d6ae9974c952 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -249,9 +249,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, if (resv == NULL) return -EINVAL; - - /* TODO: Use DMA_RESV_USAGE_READ here */ - dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) { + /* Implicitly sync only to KERNEL, WRITE and READ */ + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) { dma_fence_chain_for_each(f, f) { struct dma_fence *tmp = dma_fence_chain_contained(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 11dd2e0f7979..d13e64a69e25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl, TP_PROTO(struct amdgpu_job *job), TP_ARGS(job), TP_STRUCT__entry( - __field(uint64_t, sched_job_id) __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) - __field(unsigned int, context) - __field(unsigned int, seqno) + __field(u64, context) + __field(u64, seqno) __field(struct dma_fence *, fence) __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), TP_fast_assign( - __entry->sched_job_id = job->base.id; __assign_str(timeline); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; __assign_str(ring); __entry->num_ibs = job->num_ibs; ), - TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", - __entry->sched_job_id, __get_str(timeline), __entry->context, + TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u", + __get_str(timeline), __entry->context, __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job, TP_PROTO(struct amdgpu_job *job), TP_ARGS(job), TP_STRUCT__entry( - __field(uint64_t, sched_job_id) __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) - __field(unsigned int, context) - __field(unsigned int, seqno) + __field(u64, context) + __field(u64, seqno) __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), TP_fast_assign( - __entry->sched_job_id = job->base.id; __assign_str(timeline); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; __assign_str(ring); __entry->num_ibs = job->num_ibs; ), - TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", - __entry->sched_job_id, __get_str(timeline), __entry->context, + TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u", + __get_str(timeline), __entry->context, __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_ARGS(sched_job, fence), TP_STRUCT__entry( __string(ring, sched_job->base.sched->name) - __field(uint64_t, id) __field(struct dma_fence *, fence) - __field(uint64_t, ctx) - __field(unsigned, seqno) + __field(u64, ctx) + __field(u64, seqno) ), TP_fast_assign( __assign_str(ring); - __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), - TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", - __get_str(ring), __entry->id, - __entry->fence, __entry->ctx, - __entry->seqno) + TP_printk("job ring=%s need pipe sync to fence=%llu:%llu", + __get_str(ring), __entry->ctx, __entry->seqno) ); TRACE_EVENT(amdgpu_reset_reg_dumps, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 53b71e9d8076..27ab4e754b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -299,7 +299,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct amdgpu_bo *abo_src, *abo_dst; if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -934,7 +935,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, if (gtt->userptr) { r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { - DRM_ERROR("failed to pin userptr\n"); + dev_err(adev->dev, "failed to pin userptr\n"); return r; } } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { @@ -1060,7 +1061,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) { amdgpu_ttm_tt_unpin_userptr(bdev, ttm); - } else if (ttm->sg && gtt->gobj->import_attach) { + } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) { struct dma_buf_attachment *attach; attach = gtt->gobj->import_attach; @@ -1781,7 +1782,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) &ctx->c2p_bo, NULL); if (ret) { - DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret); amdgpu_ttm_training_reserve_vram_fini(adev); return ret; } @@ -1793,7 +1794,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) adev, adev->gmc.real_vram_size - reserve_size, reserve_size, &adev->mman.fw_reserved_memory, NULL); if (ret) { - DRM_ERROR("alloc tmr failed(%d)!\n", ret); + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); return ret; @@ -1864,13 +1865,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); + dev_err(adev->dev, + "failed initializing buffer object driver(%d).\n", r); return r; } r = amdgpu_ttm_pools_init(adev); if (r) { - DRM_ERROR("failed to init ttm pools(%d).\n", r); + dev_err(adev->dev, "failed to init ttm pools(%d).\n", r); return r; } adev->mman.initialized = true; @@ -1878,7 +1880,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize VRAM pool with all of VRAM divided into pages */ r = amdgpu_vram_mgr_init(adev); if (r) { - DRM_ERROR("Failed initializing VRAM heap.\n"); + dev_err(adev->dev, "Failed initializing VRAM heap.\n"); return r; } @@ -1958,7 +1960,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); } - DRM_INFO("amdgpu: %uM of VRAM memory ready\n", + dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n", (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); /* Compute GTT size, either based on TTM limit @@ -1981,10 +1983,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); if (r) { - DRM_ERROR("Failed initializing GTT heap.\n"); + dev_err(adev->dev, "Failed initializing GTT heap.\n"); return r; } - DRM_INFO("amdgpu: %uM of GTT memory ready.\n", + dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n", (unsigned int)(gtt_size / (1024 * 1024))); if (adev->flags & AMD_IS_APU) { @@ -1995,40 +1997,40 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize doorbell pool on PCI BAR */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); if (r) { - DRM_ERROR("Failed initializing doorbell heap.\n"); + dev_err(adev->dev, "Failed initializing doorbell heap.\n"); return r; } /* Create a boorbell page for kernel usages */ r = amdgpu_doorbell_create_kernel_doorbells(adev); if (r) { - DRM_ERROR("Failed to initialize kernel doorbells.\n"); + dev_err(adev->dev, "Failed to initialize kernel doorbells.\n"); return r; } /* Initialize preemptible memory pool */ r = amdgpu_preempt_mgr_init(adev); if (r) { - DRM_ERROR("Failed initializing PREEMPT heap.\n"); + dev_err(adev->dev, "Failed initializing PREEMPT heap.\n"); return r; } /* Initialize various on-chip memory pools */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { - DRM_ERROR("Failed initializing GDS heap.\n"); + dev_err(adev->dev, "Failed initializing GDS heap.\n"); return r; } r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); if (r) { - DRM_ERROR("Failed initializing gws heap.\n"); + dev_err(adev->dev, "Failed initializing gws heap.\n"); return r; } r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); if (r) { - DRM_ERROR("Failed initializing oa heap.\n"); + dev_err(adev->dev, "Failed initializing oa heap.\n"); return r; } if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, @@ -2060,6 +2062,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) /* return the FW reserved memory back to VRAM */ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, + NULL); if (adev->mman.stolen_reserved_size) amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, NULL, NULL); @@ -2081,13 +2085,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev); + amdgpu_doorbell_fini(adev); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; - DRM_INFO("amdgpu: ttm finalized\n"); + dev_info(adev->dev, "amdgpu: ttm finalized\n"); } /** @@ -2119,8 +2125,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) DRM_SCHED_PRIORITY_KERNEL, &sched, 1, NULL); if (r) { - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", - r); + dev_err(adev->dev, + "Failed setting up TTM BO move entity (%d)\n", + r); return; } @@ -2128,8 +2135,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); if (r) { - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", - r); + dev_err(adev->dev, + "Failed setting up TTM BO move entity (%d)\n", + r); goto error_free_entity; } } else { @@ -2200,7 +2208,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, int r; if (!direct_submit && !ring->sched.ready) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -2235,7 +2244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, error_free: amdgpu_job_free(job); - DRM_ERROR("Error scheduling IBs (%d)\n", r); + dev_err(adev->dev, "Error scheduling IBs (%d)\n", r); return r; } @@ -2354,7 +2363,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int r; if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to clear memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to clear memory with ring turned off.\n"); return -EINVAL; } @@ -2414,7 +2424,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) man = ttm_manager_type(&adev->mman.bdev, mem_type); break; default: - DRM_ERROR("Trying to evict invalid memory type\n"); + dev_err(adev->dev, "Trying to evict invalid memory type\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 208b7d1d8a27..2309df3f68a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -86,6 +86,7 @@ struct amdgpu_mman { uint32_t discovery_tmr_size; /* fw reserved memory */ struct amdgpu_bo *fw_reserved_memory; + struct amdgpu_bo *fw_reserved_memory_extend; /* firmware VRAM reservation */ u64 fw_vram_usage_start_offset; @@ -154,6 +155,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, struct ttm_resource *res); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3d9e9fdc10b4..e96f24e9ad57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -30,6 +30,11 @@ #define AMDGPU_UCODE_NAME_MAX (128) +static const struct kicker_device kicker_device_list[] = { + {0x744B, 0x00}, + {0x7551, 0xC8} +}; + static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) { DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); @@ -765,8 +770,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version); FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); +FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version); FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK); FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK); +FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version); static struct attribute *fw_attrs[] = { &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr, @@ -779,8 +786,9 @@ static struct attribute *fw_attrs[] = { &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr, &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr, &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, - &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, - &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr, + &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr, + &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr, + &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr, NULL }; @@ -1152,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM; } + if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf) + adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf); + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { @@ -1384,6 +1395,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return NULL; } +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) { + if (adev->pdev->device == kicker_device_list[i].device && + adev->pdev->revision == kicker_device_list[i].revision) + return true; + } + + return false; +} + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) { int maj, min, rev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4eedd92f000b..6349aad6da35 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -25,6 +25,8 @@ #include "amdgpu_socbb.h" +#define RS64_FW_UC_START_ADDR_LO 0x3000 + struct common_firmware_header { uint32_t size_bytes; /* size of the entire header+image(s) in bytes */ uint32_t header_size_bytes; /* size of just the header in bytes */ @@ -600,6 +602,12 @@ struct amdgpu_firmware { void *fw_buf_ptr; uint64_t fw_buf_mc; + uint32_t pldm_version; +}; + +struct kicker_device{ + unsigned short device; + u8 revision; }; void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); @@ -629,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type); const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id); void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len); +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 0a1ef95b2866..c92b8794aa73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev, pfns[i] = err_data.err_addr[i].retired_page; } ret = i; + adev->umc.err_addr_cnt = err_data.err_addr_cnt; out: kfree(err_data.err_addr); @@ -561,3 +562,26 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev, return 0; } + +int amdgpu_umc_pa2mca(struct amdgpu_device *adev, + uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps) +{ + struct ta_ras_query_address_input addr_in; + struct ta_ras_query_address_output addr_out; + int ret; + + /* nps: the pa belongs to */ + addr_in.pa.pa = pa | ((uint64_t)nps << 58); + addr_in.addr_type = TA_RAS_PA_TO_MCA; + ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out); + if (ret) { + dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx", + pa); + + return ret; + } + + *mca = addr_out.ma.err_addr; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 857693bcd8d4..ec203f9e5ffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -78,6 +78,18 @@ #define UMC_NPS_SHIFT 40 #define UMC_NPS_MASK 0xffULL +/* three column bits and one row bit in MCA address flip + * in bad page retirement + */ +#define RETIRE_FLIP_BITS_NUM 4 + +struct amdgpu_umc_flip_bits { + uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM]; + uint32_t flip_row_bit; + uint32_t r13_in_pa; + uint32_t bit_num; +}; + typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, void *data); @@ -100,6 +112,7 @@ struct amdgpu_umc_ras { bool dump_addr); uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev, uint64_t mca_addr, uint64_t retired_page); + void (*get_retire_flip_bits)(struct amdgpu_device *adev); }; struct amdgpu_umc_funcs { @@ -130,6 +143,10 @@ struct amdgpu_umc { /* active mask for umc node instance */ unsigned long active_mask; + + struct amdgpu_umc_flip_bits flip_bits; + + unsigned long err_addr_cnt; }; int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); @@ -172,4 +189,6 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev, uint64_t err_addr, uint32_t ch, uint32_t umc, uint32_t node, uint32_t socket, struct ta_ras_query_address_output *addr_out, bool dump_addr); +int amdgpu_umc_pa2mca(struct amdgpu_device *adev, + uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c new file mode 100644 index 000000000000..c3ace8030530 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <drm/drm_auth.h> +#include <drm/drm_exec.h> +#include <linux/pm_runtime.h> + +#include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_userq.h" +#include "amdgpu_userq_fence.h" + +u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) +{ + int i; + u32 userq_ip_mask = 0; + + for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { + if (adev->userq_funcs[i]) + userq_ip_mask |= (1 << i); + } + + return userq_ip_mask; +} + +static int +amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { + r = userq_funcs->unmap(uq_mgr, queue); + if (r) + queue->state = AMDGPU_USERQ_STATE_HUNG; + else + queue->state = AMDGPU_USERQ_STATE_UNMAPPED; + } + return r; +} + +static int +amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *userq_funcs = + adev->userq_funcs[queue->queue_type]; + int r = 0; + + if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { + r = userq_funcs->map(uq_mgr, queue); + if (r) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + } else { + queue->state = AMDGPU_USERQ_STATE_MAPPED; + } + } + return r; +} + +static void +amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct dma_fence *f = queue->last_fence; + int ret; + + if (f && !dma_fence_is_signaled(f)) { + ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); + if (ret <= 0) + drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", + f->context, f->seqno); + } +} + +static void +amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue, + int queue_id) +{ + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; + + uq_funcs->mqd_destroy(uq_mgr, queue); + amdgpu_userq_fence_driver_free(queue); + idr_remove(&uq_mgr->userq_idr, queue_id); + kfree(queue); +} + +int +amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_usermode_queue *queue; + int queue_id; + int ret = 0; + + mutex_lock(&uq_mgr->userq_mutex); + /* Resume all the queues for this process */ + idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) + ret += queue->state == AMDGPU_USERQ_STATE_MAPPED; + + mutex_unlock(&uq_mgr->userq_mutex); + return ret; +} + +static struct amdgpu_usermode_queue * +amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) +{ + return idr_find(&uq_mgr->userq_idr, qid); +} + +void +amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_eviction_fence_mgr *evf_mgr) +{ + struct amdgpu_eviction_fence *ev_fence; + +retry: + /* Flush any pending resume work to create ev_fence */ + flush_delayed_work(&uq_mgr->resume_work); + + mutex_lock(&uq_mgr->userq_mutex); + spin_lock(&evf_mgr->ev_fence_lock); + ev_fence = evf_mgr->ev_fence; + spin_unlock(&evf_mgr->ev_fence_lock); + if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) { + mutex_unlock(&uq_mgr->userq_mutex); + /* + * Looks like there was no pending resume work, + * add one now to create a valid eviction fence + */ + schedule_delayed_work(&uq_mgr->resume_work, 0); + goto retry; + } +} + +int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_userq_obj *userq_obj, + int size) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_bo_param bp; + int r; + + memset(&bp, 0, sizeof(bp)); + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + bp.type = ttm_bo_type_kernel; + bp.size = size; + bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + + r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); + if (r) { + drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); + return r; + } + + r = amdgpu_bo_reserve(userq_obj->obj, true); + if (r) { + drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); + goto free_obj; + } + + r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); + if (r) { + drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); + goto unresv; + } + + r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); + if (r) { + drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); + goto unresv; + } + + userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj); + amdgpu_bo_unreserve(userq_obj->obj); + memset(userq_obj->cpu_ptr, 0, size); + return 0; + +unresv: + amdgpu_bo_unreserve(userq_obj->obj); + +free_obj: + amdgpu_bo_unref(&userq_obj->obj); + return r; +} + +void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_userq_obj *userq_obj) +{ + amdgpu_bo_kunmap(userq_obj->obj); + amdgpu_bo_unref(&userq_obj->obj); +} + +uint64_t +amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_db_info *db_info, + struct drm_file *filp) +{ + uint64_t index; + struct drm_gem_object *gobj; + struct amdgpu_userq_obj *db_obj = db_info->db_obj; + int r, db_size; + + gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); + if (gobj == NULL) { + drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); + return -EINVAL; + } + + db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + drm_gem_object_put(gobj); + + r = amdgpu_bo_reserve(db_obj->obj, true); + if (r) { + drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); + goto unref_bo; + } + + /* Pin the BO before generating the index, unpin in queue destroy */ + r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); + if (r) { + drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); + goto unresv_bo; + } + + switch (db_info->queue_type) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_HW_IP_COMPUTE: + case AMDGPU_HW_IP_DMA: + db_size = sizeof(u64); + break; + + case AMDGPU_HW_IP_VCN_ENC: + db_size = sizeof(u32); + db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1; + break; + + case AMDGPU_HW_IP_VPE: + db_size = sizeof(u32); + db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1; + break; + + default: + drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", + db_info->queue_type); + r = -EINVAL; + goto unpin_bo; + } + + index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, + db_info->doorbell_offset, db_size); + drm_dbg_driver(adev_to_drm(uq_mgr->adev), + "[Usermode queues] doorbell index=%lld\n", index); + amdgpu_bo_unreserve(db_obj->obj); + return index; + +unpin_bo: + amdgpu_bo_unpin(db_obj->obj); +unresv_bo: + amdgpu_bo_unreserve(db_obj->obj); +unref_bo: + amdgpu_bo_unref(&db_obj->obj); + return r; +} + +static int +amdgpu_userq_destroy(struct drm_file *filp, int queue_id) +{ + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; + struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_usermode_queue *queue; + int r = 0; + + cancel_delayed_work_sync(&uq_mgr->resume_work); + mutex_lock(&uq_mgr->userq_mutex); + + queue = amdgpu_userq_find(uq_mgr, queue_id); + if (!queue) { + drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n"); + mutex_unlock(&uq_mgr->userq_mutex); + return -EINVAL; + } + amdgpu_userq_wait_for_last_fence(uq_mgr, queue); + r = amdgpu_bo_reserve(queue->db_obj.obj, true); + if (!r) { + amdgpu_bo_unpin(queue->db_obj.obj); + amdgpu_bo_unreserve(queue->db_obj.obj); + } + amdgpu_bo_unref(&queue->db_obj.obj); + +#if defined(CONFIG_DEBUG_FS) + debugfs_remove_recursive(queue->debugfs_queue); +#endif + r = amdgpu_userq_unmap_helper(uq_mgr, queue); + amdgpu_userq_cleanup(uq_mgr, queue, queue_id); + mutex_unlock(&uq_mgr->userq_mutex); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return r; +} + +static int amdgpu_userq_priority_permit(struct drm_file *filp, + int priority) +{ + if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH) + return 0; + + if (capable(CAP_SYS_NICE)) + return 0; + + if (drm_is_current_master(filp)) + return 0; + + return -EACCES; +} + +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_mqd_info_read(struct seq_file *m, void *unused) +{ + struct amdgpu_usermode_queue *queue = m->private; + struct amdgpu_bo *bo; + int r; + + if (!queue || !queue->mqd.obj) + return -EINVAL; + + bo = amdgpu_bo_ref(queue->mqd.obj); + r = amdgpu_bo_reserve(bo, true); + if (r) { + amdgpu_bo_unref(&bo); + return -EINVAL; + } + + seq_printf(m, "queue_type %d\n", queue->queue_type); + seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); + + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + + return 0; +} + +static int amdgpu_mqd_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_mqd_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_mqd_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_mqd_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static int +amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) +{ + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; + struct amdgpu_device *adev = uq_mgr->adev; + const struct amdgpu_userq_funcs *uq_funcs; + struct amdgpu_usermode_queue *queue; + struct amdgpu_db_info db_info; + char *queue_name; + bool skip_map_queue; + uint64_t index; + int qid, r = 0; + int priority = + (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> + AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; + + /* Usermode queues are only supported for GFX IP as of now */ + if (args->in.ip_type != AMDGPU_HW_IP_GFX && + args->in.ip_type != AMDGPU_HW_IP_DMA && + args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { + drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n", + args->in.ip_type); + return -EINVAL; + } + + r = amdgpu_userq_priority_permit(filp, priority); + if (r) + return r; + + if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && + (args->in.ip_type != AMDGPU_HW_IP_GFX) && + (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && + !amdgpu_is_tmz(adev)) { + drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n"); + return -EINVAL; + } + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + /* + * There could be a situation that we are creating a new queue while + * the other queues under this UQ_mgr are suspended. So if there is any + * resume work pending, wait for it to get done. + * + * This will also make sure we have a valid eviction fence ready to be used. + */ + mutex_lock(&adev->userq_mutex); + amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); + + uq_funcs = adev->userq_funcs[args->in.ip_type]; + if (!uq_funcs) { + drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", + args->in.ip_type); + r = -EINVAL; + goto unlock; + } + + queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL); + if (!queue) { + drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); + r = -ENOMEM; + goto unlock; + } + queue->doorbell_handle = args->in.doorbell_handle; + queue->queue_type = args->in.ip_type; + queue->vm = &fpriv->vm; + queue->priority = priority; + + db_info.queue_type = queue->queue_type; + db_info.doorbell_handle = queue->doorbell_handle; + db_info.db_obj = &queue->db_obj; + db_info.doorbell_offset = args->in.doorbell_offset; + + /* Convert relative doorbell offset into absolute doorbell index */ + index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); + if (index == (uint64_t)-EINVAL) { + drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); + kfree(queue); + goto unlock; + } + + queue->doorbell_index = index; + xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); + r = amdgpu_userq_fence_driver_alloc(adev, queue); + if (r) { + drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); + goto unlock; + } + + r = uq_funcs->mqd_create(uq_mgr, &args->in, queue); + if (r) { + drm_file_err(uq_mgr->file, "Failed to create Queue\n"); + amdgpu_userq_fence_driver_free(queue); + kfree(queue); + goto unlock; + } + + + qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); + if (qid < 0) { + drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); + amdgpu_userq_fence_driver_free(queue); + uq_funcs->mqd_destroy(uq_mgr, queue); + kfree(queue); + r = -ENOMEM; + goto unlock; + } + + /* don't map the queue if scheduling is halted */ + if (adev->userq_halt_for_enforce_isolation && + ((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) + skip_map_queue = true; + else + skip_map_queue = false; + if (!skip_map_queue) { + r = amdgpu_userq_map_helper(uq_mgr, queue); + if (r) { + drm_file_err(uq_mgr->file, "Failed to map Queue\n"); + idr_remove(&uq_mgr->userq_idr, qid); + amdgpu_userq_fence_driver_free(queue); + uq_funcs->mqd_destroy(uq_mgr, queue); + kfree(queue); + goto unlock; + } + } + + queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); + if (!queue_name) { + r = -ENOMEM; + goto unlock; + } + +#if defined(CONFIG_DEBUG_FS) + /* Queue dentry per client to hold MQD information */ + queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); + debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); +#endif + kfree(queue_name); + + args->out.queue_id = qid; + +unlock: + mutex_unlock(&uq_mgr->userq_mutex); + mutex_unlock(&adev->userq_mutex); + + return r; +} + +int amdgpu_userq_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + union drm_amdgpu_userq *args = data; + int r; + + switch (args->in.op) { + case AMDGPU_USERQ_OP_CREATE: + if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | + AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) + return -EINVAL; + r = amdgpu_userq_create(filp, args); + if (r) + drm_file_err(filp, "Failed to create usermode queue\n"); + break; + + case AMDGPU_USERQ_OP_FREE: + if (args->in.ip_type || + args->in.doorbell_handle || + args->in.doorbell_offset || + args->in.flags || + args->in.queue_va || + args->in.queue_size || + args->in.rptr_va || + args->in.wptr_va || + args->in.wptr_va || + args->in.mqd || + args->in.mqd_size) + return -EINVAL; + r = amdgpu_userq_destroy(filp, args->in.queue_id); + if (r) + drm_file_err(filp, "Failed to destroy usermode queue\n"); + break; + + default: + drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op); + return -EINVAL; + } + + return r; +} + +static int +amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_usermode_queue *queue; + int queue_id; + int ret = 0, r; + + /* Resume all the queues for this process */ + idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + r = amdgpu_userq_map_helper(uq_mgr, queue); + if (r) + ret = r; + } + + if (ret) + drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); + return ret; +} + +static int +amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + int ret; + + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + DRM_ERROR("Fail to validate\n"); + + return ret; +} + +static int +amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + struct amdgpu_vm *vm = &fpriv->vm; + struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_bo_va *bo_va; + struct ww_acquire_ctx *ticket; + struct drm_exec exec; + struct amdgpu_bo *bo; + struct dma_resv *resv; + bool clear, unlock; + int ret = 0; + + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); + drm_exec_until_all_locked(&exec) { + ret = amdgpu_vm_lock_pd(vm, &exec, 2); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) { + drm_file_err(uq_mgr->file, "Failed to lock PD\n"); + goto unlock_all; + } + + /* Lock the done list */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) { + bo = bo_va->base.bo; + if (!bo) + continue; + + ret = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(ret)) + goto unlock_all; + } + } + + spin_lock(&vm->status_lock); + while (!list_empty(&vm->moved)) { + bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, + base.vm_status); + spin_unlock(&vm->status_lock); + + /* Per VM BOs never need to bo cleared in the page tables */ + ret = amdgpu_vm_bo_update(adev, bo_va, false); + if (ret) + goto unlock_all; + spin_lock(&vm->status_lock); + } + + ticket = &exec.ticket; + while (!list_empty(&vm->invalidated)) { + bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, + base.vm_status); + resv = bo_va->base.bo->tbo.base.resv; + spin_unlock(&vm->status_lock); + + bo = bo_va->base.bo; + ret = amdgpu_userq_validate_vm_bo(NULL, bo); + if (ret) { + drm_file_err(uq_mgr->file, "Failed to validate BO\n"); + goto unlock_all; + } + + /* Try to reserve the BO to avoid clearing its ptes */ + if (!adev->debug_vm && dma_resv_trylock(resv)) { + clear = false; + unlock = true; + /* The caller is already holding the reservation lock */ + } else if (dma_resv_locking_ctx(resv) == ticket) { + clear = false; + unlock = false; + /* Somebody else is using the BO right now */ + } else { + clear = true; + unlock = false; + } + + ret = amdgpu_vm_bo_update(adev, bo_va, clear); + + if (unlock) + dma_resv_unlock(resv); + if (ret) + goto unlock_all; + + spin_lock(&vm->status_lock); + } + spin_unlock(&vm->status_lock); + + ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); + if (ret) + drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); + +unlock_all: + drm_exec_fini(&exec); + return ret; +} + +static void amdgpu_userq_restore_worker(struct work_struct *work) +{ + struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work); + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + int ret; + + flush_delayed_work(&fpriv->evf_mgr.suspend_work); + + mutex_lock(&uq_mgr->userq_mutex); + + ret = amdgpu_userq_validate_bos(uq_mgr); + if (ret) { + drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); + goto unlock; + } + + ret = amdgpu_userq_restore_all(uq_mgr); + if (ret) { + drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); + goto unlock; + } + +unlock: + mutex_unlock(&uq_mgr->userq_mutex); +} + +static int +amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_usermode_queue *queue; + int queue_id; + int ret = 0, r; + + /* Try to unmap all the queues in this process ctx */ + idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + r = amdgpu_userq_unmap_helper(uq_mgr, queue); + if (r) + ret = r; + } + + if (ret) + drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n"); + return ret; +} + +static int +amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) +{ + struct amdgpu_usermode_queue *queue; + int queue_id, ret; + + idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + struct dma_fence *f = queue->last_fence; + + if (!f || dma_fence_is_signaled(f)) + continue; + ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); + if (ret <= 0) { + drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", + f->context, f->seqno); + return -ETIMEDOUT; + } + } + + return 0; +} + +void +amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_eviction_fence *ev_fence) +{ + int ret; + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; + + /* Wait for any pending userqueue fence work to finish */ + ret = amdgpu_userq_wait_for_signal(uq_mgr); + if (ret) { + drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n"); + return; + } + + ret = amdgpu_userq_evict_all(uq_mgr); + if (ret) { + drm_file_err(uq_mgr->file, "Failed to evict userqueue\n"); + return; + } + + /* Signal current eviction fence */ + amdgpu_eviction_fence_signal(evf_mgr, ev_fence); + + if (evf_mgr->fd_closing) { + cancel_delayed_work_sync(&uq_mgr->resume_work); + return; + } + + /* Schedule a resume work */ + schedule_delayed_work(&uq_mgr->resume_work, 0); +} + +int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv, + struct amdgpu_device *adev) +{ + mutex_init(&userq_mgr->userq_mutex); + idr_init_base(&userq_mgr->userq_idr, 1); + userq_mgr->adev = adev; + userq_mgr->file = file_priv; + + mutex_lock(&adev->userq_mutex); + list_add(&userq_mgr->list, &adev->userq_mgr_list); + mutex_unlock(&adev->userq_mutex); + + INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); + return 0; +} + +void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) +{ + struct amdgpu_device *adev = userq_mgr->adev; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + uint32_t queue_id; + + cancel_delayed_work_sync(&userq_mgr->resume_work); + + mutex_lock(&adev->userq_mutex); + mutex_lock(&userq_mgr->userq_mutex); + idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { + amdgpu_userq_wait_for_last_fence(userq_mgr, queue); + amdgpu_userq_unmap_helper(userq_mgr, queue); + amdgpu_userq_cleanup(userq_mgr, queue, queue_id); + } + + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + if (uqm == userq_mgr) { + list_del(&uqm->list); + break; + } + } + idr_destroy(&userq_mgr->userq_idr); + mutex_unlock(&userq_mgr->userq_mutex); + mutex_unlock(&adev->userq_mutex); + mutex_destroy(&userq_mgr->userq_mutex); +} + +int amdgpu_userq_suspend(struct amdgpu_device *adev) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0, r; + + if (!ip_mask) + return 0; + + mutex_lock(&adev->userq_mutex); + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + cancel_delayed_work_sync(&uqm->resume_work); + mutex_lock(&uqm->userq_mutex); + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + r = amdgpu_userq_unmap_helper(uqm, queue); + if (r) + ret = r; + } + mutex_unlock(&uqm->userq_mutex); + } + mutex_unlock(&adev->userq_mutex); + return ret; +} + +int amdgpu_userq_resume(struct amdgpu_device *adev) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0, r; + + if (!ip_mask) + return 0; + + mutex_lock(&adev->userq_mutex); + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + mutex_lock(&uqm->userq_mutex); + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + r = amdgpu_userq_map_helper(uqm, queue); + if (r) + ret = r; + } + mutex_unlock(&uqm->userq_mutex); + } + mutex_unlock(&adev->userq_mutex); + return ret; +} + +int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0, r; + + /* only need to stop gfx/compute */ + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) + return 0; + + mutex_lock(&adev->userq_mutex); + if (adev->userq_halt_for_enforce_isolation) + dev_warn(adev->dev, "userq scheduling already stopped!\n"); + adev->userq_halt_for_enforce_isolation = true; + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + cancel_delayed_work_sync(&uqm->resume_work); + mutex_lock(&uqm->userq_mutex); + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + r = amdgpu_userq_unmap_helper(uqm, queue); + if (r) + ret = r; + } + } + mutex_unlock(&uqm->userq_mutex); + } + mutex_unlock(&adev->userq_mutex); + return ret; +} + +int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_mgr *uqm, *tmp; + int queue_id; + int ret = 0, r; + + /* only need to stop gfx/compute */ + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) + return 0; + + mutex_lock(&adev->userq_mutex); + if (!adev->userq_halt_for_enforce_isolation) + dev_warn(adev->dev, "userq scheduling already started!\n"); + adev->userq_halt_for_enforce_isolation = false; + list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + mutex_lock(&uqm->userq_mutex); + idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + r = amdgpu_userq_map_helper(uqm, queue); + if (r) + ret = r; + } + } + mutex_unlock(&uqm->userq_mutex); + } + mutex_unlock(&adev->userq_mutex); + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h new file mode 100644 index 000000000000..b1ca91b7cda4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGPU_USERQ_H_ +#define AMDGPU_USERQ_H_ +#include "amdgpu_eviction_fence.h" + +#define AMDGPU_MAX_USERQ_COUNT 512 + +#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base) +#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr) +#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name) + +enum amdgpu_userq_state { + AMDGPU_USERQ_STATE_UNMAPPED = 0, + AMDGPU_USERQ_STATE_MAPPED, + AMDGPU_USERQ_STATE_PREEMPTED, + AMDGPU_USERQ_STATE_HUNG, +}; + +struct amdgpu_mqd_prop; + +struct amdgpu_userq_obj { + void *cpu_ptr; + uint64_t gpu_addr; + struct amdgpu_bo *obj; +}; + +struct amdgpu_usermode_queue { + int queue_type; + enum amdgpu_userq_state state; + uint64_t doorbell_handle; + uint64_t doorbell_index; + uint64_t flags; + struct amdgpu_mqd_prop *userq_prop; + struct amdgpu_userq_mgr *userq_mgr; + struct amdgpu_vm *vm; + struct amdgpu_userq_obj mqd; + struct amdgpu_userq_obj db_obj; + struct amdgpu_userq_obj fw_obj; + struct amdgpu_userq_obj wptr_obj; + struct xarray fence_drv_xa; + struct amdgpu_userq_fence_driver *fence_drv; + struct dma_fence *last_fence; + u32 xcp_id; + int priority; + struct dentry *debugfs_queue; +}; + +struct amdgpu_userq_funcs { + int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr, + struct drm_amdgpu_userq_in *args, + struct amdgpu_usermode_queue *queue); + void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *uq); + int (*unmap)(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue); + int (*map)(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue); +}; + +/* Usermode queues for gfx */ +struct amdgpu_userq_mgr { + struct idr userq_idr; + struct mutex userq_mutex; + struct amdgpu_device *adev; + struct delayed_work resume_work; + struct list_head list; + struct drm_file *file; +}; + +struct amdgpu_db_info { + uint64_t doorbell_handle; + uint32_t queue_type; + uint32_t doorbell_offset; + struct amdgpu_userq_obj *db_obj; +}; + +int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); + +int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv, + struct amdgpu_device *adev); + +void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr); + +int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_userq_obj *userq_obj, + int size); + +void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_userq_obj *userq_obj); + +void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_eviction_fence *ev_fence); + +int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr); + +void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr, + struct amdgpu_eviction_fence_mgr *evf_mgr); + +uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_db_info *db_info, + struct drm_file *filp); + +u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev); + +int amdgpu_userq_suspend(struct amdgpu_device *adev); +int amdgpu_userq_resume(struct amdgpu_device *adev); + +int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx); +int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, + u32 idx); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c new file mode 100644 index 000000000000..c2a983ff23c9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -0,0 +1,967 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/kref.h> +#include <linux/slab.h> +#include <linux/dma-fence-unwrap.h> + +#include <drm/drm_exec.h> +#include <drm/drm_syncobj.h> + +#include "amdgpu.h" +#include "amdgpu_userq_fence.h" + +static const struct dma_fence_ops amdgpu_userq_fence_ops; +static struct kmem_cache *amdgpu_userq_fence_slab; + +int amdgpu_userq_fence_slab_init(void) +{ + amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence", + sizeof(struct amdgpu_userq_fence), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (!amdgpu_userq_fence_slab) + return -ENOMEM; + + return 0; +} + +void amdgpu_userq_fence_slab_fini(void) +{ + rcu_barrier(); + kmem_cache_destroy(amdgpu_userq_fence_slab); +} + +static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f) +{ + if (!f || f->ops != &amdgpu_userq_fence_ops) + return NULL; + + return container_of(f, struct amdgpu_userq_fence, base); +} + +static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv) +{ + return le64_to_cpu(*fence_drv->cpu_addr); +} + +int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, + struct amdgpu_usermode_queue *userq) +{ + struct amdgpu_userq_fence_driver *fence_drv; + unsigned long flags; + int r; + + fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL); + if (!fence_drv) + return -ENOMEM; + + /* Acquire seq64 memory */ + r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr, + &fence_drv->cpu_addr); + if (r) + goto free_fence_drv; + + memset(fence_drv->cpu_addr, 0, sizeof(u64)); + + kref_init(&fence_drv->refcount); + INIT_LIST_HEAD(&fence_drv->fences); + spin_lock_init(&fence_drv->fence_list_lock); + + fence_drv->adev = adev; + fence_drv->context = dma_fence_context_alloc(1); + get_task_comm(fence_drv->timeline_name, current); + + xa_lock_irqsave(&adev->userq_xa, flags); + r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index, + fence_drv, GFP_KERNEL)); + xa_unlock_irqrestore(&adev->userq_xa, flags); + if (r) + goto free_seq64; + + userq->fence_drv = fence_drv; + + return 0; + +free_seq64: + amdgpu_seq64_free(adev, fence_drv->va); +free_fence_drv: + kfree(fence_drv); + + return r; +} + +static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa) +{ + struct amdgpu_userq_fence_driver *fence_drv; + unsigned long index; + + if (xa_empty(xa)) + return; + + xa_lock(xa); + xa_for_each(xa, index, fence_drv) { + __xa_erase(xa, index); + amdgpu_userq_fence_driver_put(fence_drv); + } + + xa_unlock(xa); +} + +void +amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq) +{ + amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa); + xa_destroy(&userq->fence_drv_xa); + /* Drop the fence_drv reference held by user queue */ + amdgpu_userq_fence_driver_put(userq->fence_drv); +} + +void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv) +{ + struct amdgpu_userq_fence *userq_fence, *tmp; + struct dma_fence *fence; + u64 rptr; + int i; + + if (!fence_drv) + return; + + rptr = amdgpu_userq_fence_read(fence_drv); + + spin_lock(&fence_drv->fence_list_lock); + list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) { + fence = &userq_fence->base; + + if (rptr < fence->seqno) + break; + + dma_fence_signal(fence); + + for (i = 0; i < userq_fence->fence_drv_array_count; i++) + amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]); + + list_del(&userq_fence->link); + dma_fence_put(fence); + } + spin_unlock(&fence_drv->fence_list_lock); +} + +void amdgpu_userq_fence_driver_destroy(struct kref *ref) +{ + struct amdgpu_userq_fence_driver *fence_drv = container_of(ref, + struct amdgpu_userq_fence_driver, + refcount); + struct amdgpu_userq_fence_driver *xa_fence_drv; + struct amdgpu_device *adev = fence_drv->adev; + struct amdgpu_userq_fence *fence, *tmp; + struct xarray *xa = &adev->userq_xa; + unsigned long index, flags; + struct dma_fence *f; + + spin_lock_irqsave(&fence_drv->fence_list_lock, flags); + list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) { + f = &fence->base; + + if (!dma_fence_is_signaled(f)) { + dma_fence_set_error(f, -ECANCELED); + dma_fence_signal(f); + } + + list_del(&fence->link); + dma_fence_put(f); + } + spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); + + xa_lock_irqsave(xa, flags); + xa_for_each(xa, index, xa_fence_drv) + if (xa_fence_drv == fence_drv) + __xa_erase(xa, index); + xa_unlock_irqrestore(xa, flags); + + /* Free seq64 memory */ + amdgpu_seq64_free(adev, fence_drv->va); + kfree(fence_drv); +} + +void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv) +{ + kref_get(&fence_drv->refcount); +} + +void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv) +{ + kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy); +} + +static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence) +{ + *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC); + return *userq_fence ? 0 : -ENOMEM; +} + +static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq, + struct amdgpu_userq_fence *userq_fence, + u64 seq, struct dma_fence **f) +{ + struct amdgpu_userq_fence_driver *fence_drv; + struct dma_fence *fence; + unsigned long flags; + + fence_drv = userq->fence_drv; + if (!fence_drv) + return -EINVAL; + + spin_lock_init(&userq_fence->lock); + INIT_LIST_HEAD(&userq_fence->link); + fence = &userq_fence->base; + userq_fence->fence_drv = fence_drv; + + dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock, + fence_drv->context, seq); + + amdgpu_userq_fence_driver_get(fence_drv); + dma_fence_get(fence); + + if (!xa_empty(&userq->fence_drv_xa)) { + struct amdgpu_userq_fence_driver *stored_fence_drv; + unsigned long index, count = 0; + int i = 0; + + xa_lock(&userq->fence_drv_xa); + xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) + count++; + + userq_fence->fence_drv_array = + kvmalloc_array(count, + sizeof(struct amdgpu_userq_fence_driver *), + GFP_ATOMIC); + + if (userq_fence->fence_drv_array) { + xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) { + userq_fence->fence_drv_array[i] = stored_fence_drv; + __xa_erase(&userq->fence_drv_xa, index); + i++; + } + } + + userq_fence->fence_drv_array_count = i; + xa_unlock(&userq->fence_drv_xa); + } else { + userq_fence->fence_drv_array = NULL; + userq_fence->fence_drv_array_count = 0; + } + + /* Check if hardware has already processed the job */ + spin_lock_irqsave(&fence_drv->fence_list_lock, flags); + if (!dma_fence_is_signaled_locked(fence)) + list_add_tail(&userq_fence->link, &fence_drv->fences); + else + dma_fence_put(fence); + + spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); + + *f = fence; + + return 0; +} + +static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f) +{ + return "amdgpu_userq_fence"; +} + +static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f) +{ + struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f); + + return fence->fence_drv->timeline_name; +} + +static bool amdgpu_userq_fence_signaled(struct dma_fence *f) +{ + struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f); + struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv; + u64 rptr, wptr; + + rptr = amdgpu_userq_fence_read(fence_drv); + wptr = fence->base.seqno; + + if (rptr >= wptr) + return true; + + return false; +} + +static void amdgpu_userq_fence_free(struct rcu_head *rcu) +{ + struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu); + struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence); + struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv; + + /* Release the fence driver reference */ + amdgpu_userq_fence_driver_put(fence_drv); + + kvfree(userq_fence->fence_drv_array); + kmem_cache_free(amdgpu_userq_fence_slab, userq_fence); +} + +static void amdgpu_userq_fence_release(struct dma_fence *f) +{ + call_rcu(&f->rcu, amdgpu_userq_fence_free); +} + +static const struct dma_fence_ops amdgpu_userq_fence_ops = { + .get_driver_name = amdgpu_userq_fence_get_driver_name, + .get_timeline_name = amdgpu_userq_fence_get_timeline_name, + .signaled = amdgpu_userq_fence_signaled, + .release = amdgpu_userq_fence_release, +}; + +/** + * amdgpu_userq_fence_read_wptr - Read the userq wptr value + * + * @queue: user mode queue structure pointer + * @wptr: write pointer value + * + * Read the wptr value from userq's MQD. The userq signal IOCTL + * creates a dma_fence for the shared buffers that expects the + * RPTR value written to seq64 memory >= WPTR. + * + * Returns wptr value on success, error on failure. + */ +static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue, + u64 *wptr) +{ + struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_bo *bo; + u64 addr, *ptr; + int r; + + r = amdgpu_bo_reserve(queue->vm->root.bo, false); + if (r) + return r; + + addr = queue->userq_prop->wptr_gpu_addr; + addr &= AMDGPU_GMC_HOLE_MASK; + + mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT); + if (!mapping) { + amdgpu_bo_unreserve(queue->vm->root.bo); + DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n"); + return -EINVAL; + } + + bo = amdgpu_bo_ref(mapping->bo_va->base.bo); + amdgpu_bo_unreserve(queue->vm->root.bo); + r = amdgpu_bo_reserve(bo, true); + if (r) { + DRM_ERROR("Failed to reserve userqueue wptr bo"); + return r; + } + + r = amdgpu_bo_kmap(bo, (void **)&ptr); + if (r) { + DRM_ERROR("Failed mapping the userqueue wptr bo"); + goto map_error; + } + + *wptr = le64_to_cpu(*ptr); + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + + return 0; + +map_error: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + + return r; +} + +static void amdgpu_userq_fence_cleanup(struct dma_fence *fence) +{ + dma_fence_put(fence); +} + +int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; + struct drm_amdgpu_userq_signal *args = data; + struct drm_gem_object **gobj_write = NULL; + struct drm_gem_object **gobj_read = NULL; + struct amdgpu_usermode_queue *queue; + struct amdgpu_userq_fence *userq_fence; + struct drm_syncobj **syncobj = NULL; + u32 *bo_handles_write, num_write_bo_handles; + u32 *syncobj_handles, num_syncobj_handles; + u32 *bo_handles_read, num_read_bo_handles; + int r, i, entry, rentry, wentry; + struct dma_fence *fence; + struct drm_exec exec; + u64 wptr; + + num_syncobj_handles = args->num_syncobj_handles; + syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles), + size_mul(sizeof(u32), num_syncobj_handles)); + if (IS_ERR(syncobj_handles)) + return PTR_ERR(syncobj_handles); + + /* Array of pointers to the looked up syncobjs */ + syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL); + if (!syncobj) { + r = -ENOMEM; + goto free_syncobj_handles; + } + + for (entry = 0; entry < num_syncobj_handles; entry++) { + syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]); + if (!syncobj[entry]) { + r = -ENOENT; + goto free_syncobj; + } + } + + num_read_bo_handles = args->num_bo_read_handles; + bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles), + sizeof(u32) * num_read_bo_handles); + if (IS_ERR(bo_handles_read)) { + r = PTR_ERR(bo_handles_read); + goto free_syncobj; + } + + /* Array of pointers to the GEM read objects */ + gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL); + if (!gobj_read) { + r = -ENOMEM; + goto free_bo_handles_read; + } + + for (rentry = 0; rentry < num_read_bo_handles; rentry++) { + gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]); + if (!gobj_read[rentry]) { + r = -ENOENT; + goto put_gobj_read; + } + } + + num_write_bo_handles = args->num_bo_write_handles; + bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles), + sizeof(u32) * num_write_bo_handles); + if (IS_ERR(bo_handles_write)) { + r = PTR_ERR(bo_handles_write); + goto put_gobj_read; + } + + /* Array of pointers to the GEM write objects */ + gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL); + if (!gobj_write) { + r = -ENOMEM; + goto free_bo_handles_write; + } + + for (wentry = 0; wentry < num_write_bo_handles; wentry++) { + gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]); + if (!gobj_write[wentry]) { + r = -ENOENT; + goto put_gobj_write; + } + } + + /* Retrieve the user queue */ + queue = idr_find(&userq_mgr->userq_idr, args->queue_id); + if (!queue) { + r = -ENOENT; + goto put_gobj_write; + } + + r = amdgpu_userq_fence_read_wptr(queue, &wptr); + if (r) + goto put_gobj_write; + + r = amdgpu_userq_fence_alloc(&userq_fence); + if (r) + goto put_gobj_write; + + /* We are here means UQ is active, make sure the eviction fence is valid */ + amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); + + /* Create a new fence */ + r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence); + if (r) { + mutex_unlock(&userq_mgr->userq_mutex); + kmem_cache_free(amdgpu_userq_fence_slab, userq_fence); + goto put_gobj_write; + } + + dma_fence_put(queue->last_fence); + queue->last_fence = dma_fence_get(fence); + mutex_unlock(&userq_mgr->userq_mutex); + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, + (num_read_bo_handles + num_write_bo_handles)); + + /* Lock all BOs with retry handling */ + drm_exec_until_all_locked(&exec) { + r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1); + drm_exec_retry_on_contention(&exec); + if (r) { + amdgpu_userq_fence_cleanup(fence); + goto exec_fini; + } + + r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1); + drm_exec_retry_on_contention(&exec); + if (r) { + amdgpu_userq_fence_cleanup(fence); + goto exec_fini; + } + } + + for (i = 0; i < num_read_bo_handles; i++) { + if (!gobj_read || !gobj_read[i]->resv) + continue; + + dma_resv_add_fence(gobj_read[i]->resv, fence, + DMA_RESV_USAGE_READ); + } + + for (i = 0; i < num_write_bo_handles; i++) { + if (!gobj_write || !gobj_write[i]->resv) + continue; + + dma_resv_add_fence(gobj_write[i]->resv, fence, + DMA_RESV_USAGE_WRITE); + } + + /* Add the created fence to syncobj/BO's */ + for (i = 0; i < num_syncobj_handles; i++) + drm_syncobj_replace_fence(syncobj[i], fence); + + /* drop the reference acquired in fence creation function */ + dma_fence_put(fence); + +exec_fini: + drm_exec_fini(&exec); +put_gobj_write: + while (wentry-- > 0) + drm_gem_object_put(gobj_write[wentry]); + kfree(gobj_write); +free_bo_handles_write: + kfree(bo_handles_write); +put_gobj_read: + while (rentry-- > 0) + drm_gem_object_put(gobj_read[rentry]); + kfree(gobj_read); +free_bo_handles_read: + kfree(bo_handles_read); +free_syncobj: + while (entry-- > 0) + if (syncobj[entry]) + drm_syncobj_put(syncobj[entry]); + kfree(syncobj); +free_syncobj_handles: + kfree(syncobj_handles); + + return r; +} + +int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write; + u32 num_syncobj, num_read_bo_handles, num_write_bo_handles; + struct drm_amdgpu_userq_fence_info *fence_info = NULL; + struct drm_amdgpu_userq_wait *wait_info = data; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; + struct amdgpu_usermode_queue *waitq; + struct drm_gem_object **gobj_write; + struct drm_gem_object **gobj_read; + struct dma_fence **fences = NULL; + u16 num_points, num_fences = 0; + int r, i, rentry, wentry, cnt; + struct drm_exec exec; + + num_read_bo_handles = wait_info->num_bo_read_handles; + bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles), + size_mul(sizeof(u32), num_read_bo_handles)); + if (IS_ERR(bo_handles_read)) + return PTR_ERR(bo_handles_read); + + num_write_bo_handles = wait_info->num_bo_write_handles; + bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles), + size_mul(sizeof(u32), num_write_bo_handles)); + if (IS_ERR(bo_handles_write)) { + r = PTR_ERR(bo_handles_write); + goto free_bo_handles_read; + } + + num_syncobj = wait_info->num_syncobj_handles; + syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles), + size_mul(sizeof(u32), num_syncobj)); + if (IS_ERR(syncobj_handles)) { + r = PTR_ERR(syncobj_handles); + goto free_bo_handles_write; + } + + num_points = wait_info->num_syncobj_timeline_handles; + timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles), + sizeof(u32) * num_points); + if (IS_ERR(timeline_handles)) { + r = PTR_ERR(timeline_handles); + goto free_syncobj_handles; + } + + timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points), + sizeof(u32) * num_points); + if (IS_ERR(timeline_points)) { + r = PTR_ERR(timeline_points); + goto free_timeline_handles; + } + + gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL); + if (!gobj_read) { + r = -ENOMEM; + goto free_timeline_points; + } + + for (rentry = 0; rentry < num_read_bo_handles; rentry++) { + gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]); + if (!gobj_read[rentry]) { + r = -ENOENT; + goto put_gobj_read; + } + } + + gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL); + if (!gobj_write) { + r = -ENOMEM; + goto put_gobj_read; + } + + for (wentry = 0; wentry < num_write_bo_handles; wentry++) { + gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]); + if (!gobj_write[wentry]) { + r = -ENOENT; + goto put_gobj_write; + } + } + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, + (num_read_bo_handles + num_write_bo_handles)); + + /* Lock all BOs with retry handling */ + drm_exec_until_all_locked(&exec) { + r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1); + drm_exec_retry_on_contention(&exec); + if (r) { + drm_exec_fini(&exec); + goto put_gobj_write; + } + + r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1); + drm_exec_retry_on_contention(&exec); + if (r) { + drm_exec_fini(&exec); + goto put_gobj_write; + } + } + + if (!wait_info->num_fences) { + if (num_points) { + struct dma_fence_unwrap iter; + struct dma_fence *fence; + struct dma_fence *f; + + for (i = 0; i < num_points; i++) { + r = drm_syncobj_find_fence(filp, timeline_handles[i], + timeline_points[i], + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, + &fence); + if (r) + goto exec_fini; + + dma_fence_unwrap_for_each(f, &iter, fence) + num_fences++; + + dma_fence_put(fence); + } + } + + /* Count syncobj's fence */ + for (i = 0; i < num_syncobj; i++) { + struct dma_fence *fence; + + r = drm_syncobj_find_fence(filp, syncobj_handles[i], + 0, + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, + &fence); + if (r) + goto exec_fini; + + num_fences++; + dma_fence_put(fence); + } + + /* Count GEM objects fence */ + for (i = 0; i < num_read_bo_handles; i++) { + struct dma_resv_iter resv_cursor; + struct dma_fence *fence; + + dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, + DMA_RESV_USAGE_READ, fence) + num_fences++; + } + + for (i = 0; i < num_write_bo_handles; i++) { + struct dma_resv_iter resv_cursor; + struct dma_fence *fence; + + dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, + DMA_RESV_USAGE_WRITE, fence) + num_fences++; + } + + /* + * Passing num_fences = 0 means that userspace doesn't want to + * retrieve userq_fence_info. If num_fences = 0 we skip filling + * userq_fence_info and return the actual number of fences on + * args->num_fences. + */ + wait_info->num_fences = num_fences; + } else { + /* Array of fence info */ + fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL); + if (!fence_info) { + r = -ENOMEM; + goto exec_fini; + } + + /* Array of fences */ + fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) { + r = -ENOMEM; + goto free_fence_info; + } + + /* Retrieve GEM read objects fence */ + for (i = 0; i < num_read_bo_handles; i++) { + struct dma_resv_iter resv_cursor; + struct dma_fence *fence; + + dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, + DMA_RESV_USAGE_READ, fence) { + if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { + r = -EINVAL; + goto free_fences; + } + + fences[num_fences++] = fence; + dma_fence_get(fence); + } + } + + /* Retrieve GEM write objects fence */ + for (i = 0; i < num_write_bo_handles; i++) { + struct dma_resv_iter resv_cursor; + struct dma_fence *fence; + + dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, + DMA_RESV_USAGE_WRITE, fence) { + if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { + r = -EINVAL; + goto free_fences; + } + + fences[num_fences++] = fence; + dma_fence_get(fence); + } + } + + if (num_points) { + struct dma_fence_unwrap iter; + struct dma_fence *fence; + struct dma_fence *f; + + for (i = 0; i < num_points; i++) { + r = drm_syncobj_find_fence(filp, timeline_handles[i], + timeline_points[i], + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, + &fence); + if (r) + goto free_fences; + + dma_fence_unwrap_for_each(f, &iter, fence) { + if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { + r = -EINVAL; + goto free_fences; + } + + dma_fence_get(f); + fences[num_fences++] = f; + } + + dma_fence_put(fence); + } + } + + /* Retrieve syncobj's fence */ + for (i = 0; i < num_syncobj; i++) { + struct dma_fence *fence; + + r = drm_syncobj_find_fence(filp, syncobj_handles[i], + 0, + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, + &fence); + if (r) + goto free_fences; + + if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) { + r = -EINVAL; + goto free_fences; + } + + fences[num_fences++] = fence; + } + + /* + * Keep only the latest fences to reduce the number of values + * given back to userspace. + */ + num_fences = dma_fence_dedup_array(fences, num_fences); + + waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); + if (!waitq) { + r = -EINVAL; + goto free_fences; + } + + for (i = 0, cnt = 0; i < num_fences; i++) { + struct amdgpu_userq_fence_driver *fence_drv; + struct amdgpu_userq_fence *userq_fence; + u32 index; + + userq_fence = to_amdgpu_userq_fence(fences[i]); + if (!userq_fence) { + /* + * Just waiting on other driver fences should + * be good for now + */ + r = dma_fence_wait(fences[i], true); + if (r) { + dma_fence_put(fences[i]); + goto free_fences; + } + + dma_fence_put(fences[i]); + continue; + } + + fence_drv = userq_fence->fence_drv; + /* + * We need to make sure the user queue release their reference + * to the fence drivers at some point before queue destruction. + * Otherwise, we would gather those references until we don't + * have any more space left and crash. + */ + r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv, + xa_limit_32b, GFP_KERNEL); + if (r) + goto free_fences; + + amdgpu_userq_fence_driver_get(fence_drv); + + /* Store drm syncobj's gpu va address and value */ + fence_info[cnt].va = fence_drv->va; + fence_info[cnt].value = fences[i]->seqno; + + dma_fence_put(fences[i]); + /* Increment the actual userq fence count */ + cnt++; + } + + wait_info->num_fences = cnt; + /* Copy userq fence info to user space */ + if (copy_to_user(u64_to_user_ptr(wait_info->out_fences), + fence_info, wait_info->num_fences * sizeof(*fence_info))) { + r = -EFAULT; + goto free_fences; + } + + kfree(fences); + kfree(fence_info); + } + + drm_exec_fini(&exec); + for (i = 0; i < num_read_bo_handles; i++) + drm_gem_object_put(gobj_read[i]); + kfree(gobj_read); + + for (i = 0; i < num_write_bo_handles; i++) + drm_gem_object_put(gobj_write[i]); + kfree(gobj_write); + + kfree(timeline_points); + kfree(timeline_handles); + kfree(syncobj_handles); + kfree(bo_handles_write); + kfree(bo_handles_read); + + return 0; + +free_fences: + while (num_fences-- > 0) + dma_fence_put(fences[num_fences]); + kfree(fences); +free_fence_info: + kfree(fence_info); +exec_fini: + drm_exec_fini(&exec); +put_gobj_write: + while (wentry-- > 0) + drm_gem_object_put(gobj_write[wentry]); + kfree(gobj_write); +put_gobj_read: + while (rentry-- > 0) + drm_gem_object_put(gobj_read[rentry]); + kfree(gobj_read); +free_timeline_points: + kfree(timeline_points); +free_timeline_handles: + kfree(timeline_handles); +free_syncobj_handles: + kfree(syncobj_handles); +free_bo_handles_write: + kfree(bo_handles_write); +free_bo_handles_read: + kfree(bo_handles_read); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h new file mode 100644 index 000000000000..97a125ab8a78 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_USERQ_FENCE_H__ +#define __AMDGPU_USERQ_FENCE_H__ + +#include <linux/types.h> + +#include "amdgpu_userq.h" + +struct amdgpu_userq_fence { + struct dma_fence base; + /* + * This lock is necessary to synchronize the + * userqueue dma fence operations. + */ + spinlock_t lock; + struct list_head link; + unsigned long fence_drv_array_count; + struct amdgpu_userq_fence_driver *fence_drv; + struct amdgpu_userq_fence_driver **fence_drv_array; +}; + +struct amdgpu_userq_fence_driver { + struct kref refcount; + u64 va; + u64 gpu_addr; + u64 *cpu_addr; + u64 context; + /* + * This lock is necesaary to synchronize the access + * to the fences list by the fence driver. + */ + spinlock_t fence_list_lock; + struct list_head fences; + struct amdgpu_device *adev; + char timeline_name[TASK_COMM_LEN]; +}; + +int amdgpu_userq_fence_slab_init(void); +void amdgpu_userq_fence_slab_fini(void); + +void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv); +void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv); +int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, + struct amdgpu_usermode_queue *userq); +void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq); +void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv); +void amdgpu_userq_fence_driver_destroy(struct kref *ref); +int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 1991dd3d1056..f1f67521c29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround); mutex_init(&adev->vcn.inst[i].vcn_pg_lock); + mutex_init(&adev->vcn.inst[i].engine_reset_mutex); atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0); INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler); atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); @@ -353,9 +354,9 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i) cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); - /* err_event_athub will corrupt VCPU buffer, so we need to + /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to * restore fw data and clear buffer in amdgpu_vcn_resume() */ - if (in_ras_intr) + if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset) return 0; return amdgpu_vcn_save_vcpu_bo_inst(adev, i); @@ -1451,3 +1452,78 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, return ret; } + +/** + * amdgpu_vcn_reset_engine - Reset a specific VCN engine + * @adev: Pointer to the AMDGPU device + * @instance_id: VCN engine instance to reset + * + * Returns: 0 on success, or a negative error code on failure. + */ +static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev, + uint32_t instance_id) +{ + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id]; + int r, i; + + mutex_lock(&vinst->engine_reset_mutex); + /* Stop the scheduler's work queue for the dec and enc rings if they are running. + * This ensures that no new tasks are submitted to the queues while + * the reset is in progress. + */ + drm_sched_wqueue_stop(&vinst->ring_dec.sched); + for (i = 0; i < vinst->num_enc_rings; i++) + drm_sched_wqueue_stop(&vinst->ring_enc[i].sched); + + /* Perform the VCN reset for the specified instance */ + r = vinst->reset(vinst); + if (r) + goto unlock; + r = amdgpu_ring_test_ring(&vinst->ring_dec); + if (r) + goto unlock; + for (i = 0; i < vinst->num_enc_rings; i++) { + r = amdgpu_ring_test_ring(&vinst->ring_enc[i]); + if (r) + goto unlock; + } + amdgpu_fence_driver_force_completion(&vinst->ring_dec); + for (i = 0; i < vinst->num_enc_rings; i++) + amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]); + + /* Restart the scheduler's work queue for the dec and enc rings + * if they were stopped by this function. This allows new tasks + * to be submitted to the queues after the reset is complete. + */ + drm_sched_wqueue_start(&vinst->ring_dec.sched); + for (i = 0; i < vinst->num_enc_rings; i++) + drm_sched_wqueue_start(&vinst->ring_enc[i].sched); + +unlock: + mutex_unlock(&vinst->engine_reset_mutex); + + return r; +} + +/** + * amdgpu_vcn_ring_reset - Reset a VCN ring + * @ring: ring to reset + * @vmid: vmid of guilty job + * @timedout_fence: fence of timed out job + * + * This helper is for VCN blocks without unified queues because + * resetting the engine resets all queues in that case. With + * unified queues we have one queue per engine. + * Returns: 0 on success, or a negative error code on failure. + */ +int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + + if (adev->vcn.inst[ring->me].using_unified_queue) + return -EINVAL; + + return amdgpu_vcn_reset_engine(adev, ring->me); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index cdcdae7f71ce..0bc0a94d7cf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -66,7 +66,6 @@ #define VCN_ENC_CMD_REG_WAIT 0x0000000c #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 -#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_VID_IP_ADDRESS_2_0 0x0 #define VCN_AON_IP_ADDRESS_2_0 0x30000 @@ -331,7 +330,9 @@ struct amdgpu_vcn_inst { struct dpg_pause_state *new_state); int (*set_pg_state)(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); + int (*reset)(struct amdgpu_vcn_inst *vinst); bool using_unified_queue; + struct mutex engine_reset_mutex; }; struct amdgpu_vcn_ras { @@ -553,5 +554,7 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev); int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state); - +int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *guilty_fence); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 0bb8cbe0dcc0..13f0cdeb59c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -1323,6 +1323,9 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo { struct amdgpu_virt *virt = &adev->virt; + if (!virt->ops || !virt->ops->req_ras_err_count) + return -EOPNOTSUPP; + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host * will ignore incoming guest messages. Ratelimit the guest messages to * prevent guest self DOS. @@ -1378,14 +1381,16 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev, used_size = host_telemetry->header.used_size; if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) - return 0; + return -EINVAL; cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL); if (!cper_dump) return -ENOMEM; - if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) + if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) { + ret = -EINVAL; goto out; + } *more = cper_dump->more; @@ -1425,7 +1430,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev) int ret = 0; uint32_t more = 0; - if (!amdgpu_sriov_ras_cper_en(adev)) + if (!virt->ops || !virt->ops->req_ras_cper_dump) return -EOPNOTSUPP; do { @@ -1434,7 +1439,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev) adev, virt->fw_reserve.ras_telemetry, &more); else ret = 0; - } while (more); + } while (more && !ret); return ret; } @@ -1444,6 +1449,9 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update) struct amdgpu_virt *virt = &adev->virt; int ret = 0; + if (!amdgpu_sriov_ras_cper_en(adev)) + return -EOPNOTSUPP; + if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) && down_read_trylock(&adev->reset_domain->sem)) { mutex_lock(&virt->ras.ras_telemetry_mutex); @@ -1480,3 +1488,16 @@ bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, return true; } + +/* + * amdgpu_virt_request_bad_pages() - request bad pages + * @adev: amdgpu device. + * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region + */ +void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + + if (virt->ops && virt->ops->req_bad_pages) + virt->ops->req_bad_pages(adev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index df03dba67ab8..3da3ebb1d9a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -97,6 +97,7 @@ struct amdgpu_virt_ops { bool (*rcvd_ras_intr)(struct amdgpu_device *adev); int (*req_ras_err_count)(struct amdgpu_device *adev); int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr); + int (*req_bad_pages)(struct amdgpu_device *adev); }; /* @@ -146,11 +147,15 @@ enum AMDGIM_FEATURE_FLAG { enum AMDGIM_REG_ACCESS_FLAG { /* Use PSP to program IH_RB_CNTL */ - AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0), + AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0), /* Use RLC to program MMHUB regs */ - AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), + AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), /* Use RLC to program GC regs */ - AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), + AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), + /* Use PSP to program L1_TLB_CNTL */ + AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3), + /* Use RLCG to program SQ_CONFIG1 */ + AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4), }; struct amdgim_pf2vf_info_v1 { @@ -260,7 +265,10 @@ struct amdgpu_virt { uint32_t reg_val_offs; struct amdgpu_irq_src ack_irq; struct amdgpu_irq_src rcv_irq; + struct work_struct flr_work; + struct work_struct bad_pages_work; + struct amdgpu_mm_table mm_table; const struct amdgpu_virt_ops *ops; struct amdgpu_vf_error_buffer vf_errors; @@ -295,6 +303,9 @@ struct amdgpu_virt { union amd_sriov_ras_caps ras_telemetry_en_caps; struct amdgpu_virt_ras ras; struct amd_sriov_ras_telemetry_error_count count_cache; + + /* hibernate and resume with different VF feature for xgmi enabled system */ + bool is_xgmi_node_migrate_enabled; }; struct amdgpu_video_codec_info; @@ -330,9 +341,17 @@ struct amdgpu_video_codec_info; (amdgpu_sriov_vf((adev)) && \ ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN))) +#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN))) + #define amdgpu_sriov_rlcg_error_report_enabled(adev) \ (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) +#define amdgpu_sriov_reg_access_sq_config(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) @@ -376,6 +395,10 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) #define amdgpu_sriov_is_mes_info_enable(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE) + +#define amdgpu_virt_xgmi_migrate_enabled(adev) \ + ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0) + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); @@ -423,4 +446,5 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update) int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev); bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, enum amdgpu_ras_block block); +void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ce52b4d75e94..5cacf5717016 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -622,7 +622,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_warn_ratelimited("Evicted user BO is not reserved\n"); if (ti) { - pr_warn_ratelimited("pid %d\n", ti->pid); + pr_warn_ratelimited("pid %d\n", ti->task.pid); amdgpu_vm_put_task_info(ti); } @@ -765,6 +765,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool cleaner_shader_needed = false; bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; + struct amdgpu_fence *af; unsigned int patch; int r; @@ -787,7 +788,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; - cleaner_shader_needed = adev->gfx.enable_cleaner_shader && + cleaner_shader_needed = job->run_cleaner_shader && + adev->gfx.enable_cleaner_shader && ring->funcs->emit_cleaner_shader && job->base.s_fence && &job->base.s_fence->scheduled == isolation->spearhead; @@ -817,7 +819,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid); - if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && + if (ring->funcs->emit_gds_switch && gds_switch_needed) { amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, job->gds_size, job->gws_base, @@ -829,6 +831,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, r = amdgpu_fence_emit(ring, &fence, NULL, 0); if (r) return r; + /* this is part of the job's context */ + af = container_of(fence, struct amdgpu_fence, base); + af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; } if (vm_flush_needed) { @@ -1270,8 +1275,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } else { struct drm_gem_object *obj = &bo->tbo.base; - if (obj->import_attach && bo_va->is_xgmi) { - struct dma_buf *dma_buf = obj->import_attach->dmabuf; + if (drm_gem_is_imported(obj) && bo_va->is_xgmi) { + struct dma_buf *dma_buf = obj->dma_buf; struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); @@ -1630,7 +1635,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, * validation */ if (vm->is_compute_context && - bo_va->base.bo->tbo.base.import_attach && + drm_gem_is_imported(&bo_va->base.bo->tbo.base) && (!bo_va->base.bo->tbo.resource || bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) amdgpu_vm_bo_evicted_user(&bo_va->base); @@ -2394,10 +2399,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, else adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; - DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", - vm_size, adev->vm_manager.num_level + 1, - adev->vm_manager.block_size, - adev->vm_manager.fragment_size); + dev_info( + adev->dev, + "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", + vm_size, adev->vm_manager.num_level + 1, + adev->vm_manager.block_size, adev->vm_manager.fragment_size); } /** @@ -2408,13 +2414,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_BOOKKEEP, - true, timeout); + timeout = drm_sched_entity_flush(&vm->immediate, timeout); if (timeout <= 0) return timeout; - return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); + return drm_sched_entity_flush(&vm->delayed, timeout); } static void amdgpu_vm_destroy_task_info(struct kref *kref) @@ -2446,7 +2450,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid) */ void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info) { - kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info); + if (task_info) + kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info); } /** @@ -2506,11 +2511,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) if (!vm->task_info) return; - if (vm->task_info->pid == current->pid) + if (vm->task_info->task.pid == current->pid) return; - vm->task_info->pid = current->pid; - get_task_comm(vm->task_info->task_name, current); + vm->task_info->task.pid = current->pid; + get_task_comm(vm->task_info->task.comm, current); if (current->group_leader->mm != current->mm) return; @@ -2563,8 +2568,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); - DRM_DEBUG_DRIVER("VM update mode is %s\n", - vm->use_cpu_for_update ? "CPU" : "SDMA"); + dev_dbg(adev->dev, "VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); @@ -2606,7 +2611,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_vm_create_task_info(vm); if (r) - DRM_DEBUG("Failed to create task info for VM\n"); + dev_dbg(adev->dev, "Failed to create task info for VM\n"); amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unref(&root_bo); @@ -2657,8 +2662,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) /* Update VM state */ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - DRM_DEBUG_DRIVER("VM update mode is %s\n", - vm->use_cpu_for_update ? "CPU" : "SDMA"); + dev_dbg(adev->dev, "VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); @@ -2773,7 +2778,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) dev_warn(adev->dev, "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n", - ti->process_name, ti->pid, ti->task_name, ti->tgid); + ti->process_name, ti->task.pid, ti->task.comm, ti->tgid); } amdgpu_vm_put_task_info(vm->task_info); @@ -2981,7 +2986,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, error_unlock: amdgpu_bo_unreserve(root); if (r < 0) - DRM_ERROR("Can't handle page fault (%d)\n", r); + dev_err(adev->dev, "Can't handle page fault (%d)\n", r); error_unref: amdgpu_bo_unref(&root); @@ -3155,3 +3160,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo) { return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv; } + +void amdgpu_vm_print_task_info(struct amdgpu_device *adev, + struct amdgpu_task_info *task_info) +{ + dev_err(adev->dev, + " Process %s pid %d thread %s pid %d\n", + task_info->process_name, task_info->tgid, + task_info->task.comm, task_info->task.pid); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index f3ad687125ad..fd086efd8457 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs { }; struct amdgpu_task_info { + struct drm_wedge_task_info task; char process_name[TASK_COMM_LEN]; - char task_name[TASK_COMM_LEN]; - pid_t pid; pid_t tgid; struct kref refcount; }; @@ -668,4 +667,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); +void amdgpu_vm_print_task_info(struct amdgpu_device *adev, + struct amdgpu_task_info *task_info); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c index 51cddfa3f1e8..5d26797356a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c @@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work) } static const struct dma_fence_ops amdgpu_tlb_fence_ops = { - .use_64bit_seqno = true, .get_driver_name = amdgpu_tlb_fence_get_driver_name, .get_timeline_name = amdgpu_tlb_fence_get_timeline_name }; @@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm INIT_WORK(&f->work, amdgpu_tlb_fence_work); spin_lock_init(&f->lock); - dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock, - vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); + dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock, + vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); /* TODO: We probably need a separate wq here */ dma_fence_get(&f->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 6da8994e0469..07c936e90d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -24,6 +24,7 @@ #include <linux/dma-mapping.h> #include <drm/ttm/ttm_range_manager.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vm.h" @@ -462,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, int r; lpfn = (u64)place->lpfn << PAGE_SHIFT; - if (!lpfn) + if (!lpfn || lpfn > man->size) lpfn = man->size; fpfn = (u64)place->fpfn << PAGE_SHIFT; @@ -782,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) } /** + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks + * + * @adev: amdgpu device pointer + * + * Reset the cleared drm buddy blocks. + */ +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct drm_buddy *mm = &mgr->mm; + + mutex_lock(&mgr->lock); + drm_buddy_reset_clear(mm, false); + mutex_unlock(&mgr->lock); +} + +/** * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection * * @man: TTM memory type manager @@ -907,6 +925,9 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) struct ttm_resource_manager *man = &mgr->manager; int err; + man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size); + if (IS_ERR(man->cg)) + return PTR_ERR(man->cg); ttm_resource_manager_init(man, &adev->mman.bdev, adev->gmc.real_vram_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h index b256cbc2bc27..2c88d5fd87da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -66,7 +66,10 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res) static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res) { - to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED; + struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res); + + WARN_ON(ares->flags & DRM_BUDDY_CLEARED); + ares->flags |= DRM_BUDDY_CLEARED; } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 23b6f7a4aa4a..c417f8689220 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -27,6 +27,9 @@ #include <drm/drm_drv.h> #include "../amdxcp/amdgpu_xcp_drv.h" +static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr); +static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr); + static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp_ip *xcp_ip, int xcp_state) { @@ -189,7 +192,7 @@ static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto out; } - + amdgpu_xcp_sysfs_entries_update(xcp_mgr); out: mutex_unlock(&xcp_mgr->xcp_lock); @@ -215,15 +218,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode); } -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr) { - int mode; + if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + return true; if (!amdgpu_sriov_vf(xcp_mgr->adev) && xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) - return xcp_mgr->mode; + return true; - if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE && + xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS) + return true; + + return false; +} + +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int mode; + + if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr)) return xcp_mgr->mode; if (!(flags & AMDGPU_XCP_FL_LOCKED)) @@ -263,9 +278,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) if (ret == -ENOSPC) { dev_warn(adev->dev, "Skip xcp node #%d when out of drm node resource.", i); - return 0; + ret = 0; + goto out; } else if (ret) { - return ret; + goto out; } /* Redirect all IOCTLs to the primary device */ @@ -278,9 +294,14 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) p_ddev->vma_offset_manager = ddev->vma_offset_manager; p_ddev->driver = &amdgpu_partition_driver; adev->xcp_mgr->xcp[i].ddev = p_ddev; + + dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]); } + ret = 0; +out: + amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr); - return 0; + return ret; } int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, @@ -288,6 +309,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, struct amdgpu_xcp_mgr_funcs *xcp_funcs) { struct amdgpu_xcp_mgr *xcp_mgr; + int i; if (!xcp_funcs || !xcp_funcs->get_ip_details) return -EINVAL; @@ -306,6 +328,8 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode); adev->xcp_mgr = xcp_mgr; + for (i = 0; i < MAX_XCP; ++i) + xcp_mgr->xcp[i].xcp_mgr = xcp_mgr; return amdgpu_xcp_dev_alloc(adev); } @@ -433,6 +457,223 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev, } } +int amdgpu_xcp_select_scheds(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds) +{ + u32 sel_xcp_id; + int i; + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + + if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { + u32 least_ref_cnt = ~0; + + fpriv->xcp_id = 0; + for (i = 0; i < xcp_mgr->num_xcps; i++) { + u32 total_ref_cnt; + + total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt); + if (total_ref_cnt < least_ref_cnt) { + fpriv->xcp_id = i; + least_ref_cnt = total_ref_cnt; + } + } + } + sel_xcp_id = fpriv->xcp_id; + + if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { + *num_scheds = + xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; + *scheds = + xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; + atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); + dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id); + } else { + dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id); + return -ENOENT; + } + + return 0; +} + +static void amdgpu_set_xcp_id(struct amdgpu_device *adev, + uint32_t inst_idx, + struct amdgpu_ring *ring) +{ + int xcp_id; + enum AMDGPU_XCP_IP_BLOCK ip_blk; + uint32_t inst_mask; + + ring->xcp_id = AMDGPU_XCP_NO_PARTITION; + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; + if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || + (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) + return; + + inst_mask = 1 << inst_idx; + + switch (ring->funcs->type) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + ip_blk = AMDGPU_XCP_GFX; + break; + case AMDGPU_RING_TYPE_SDMA: + ip_blk = AMDGPU_XCP_SDMA; + break; + case AMDGPU_RING_TYPE_VCN_ENC: + case AMDGPU_RING_TYPE_VCN_JPEG: + ip_blk = AMDGPU_XCP_VCN; + break; + default: + dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type); + return; + } + + for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { + if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { + ring->xcp_id = xcp_id; + dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, + ring->xcp_id); + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; + break; + } + } +} + +static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int sel_xcp_id) +{ + unsigned int *num_gpu_sched; + + num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] + .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; + adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] + .sched[(*num_gpu_sched)++] = &ring->sched; + dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d", + ring->name, sel_xcp_id, ring->funcs->type, + ring->hw_prio, *num_gpu_sched); +} + +static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < MAX_XCP; i++) { + atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); + memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); + } + + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + ring = adev->rings[i]; + if (!ring || !ring->sched.ready || ring->no_scheduler) + continue; + + amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id); + + /* VCN may be shared by two partitions under CPX MODE in certain + * configs. + */ + if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst)) + amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); + } + + return 0; +} + +int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + amdgpu_set_xcp_id(adev, ring->xcc_id, ring); + else + amdgpu_set_xcp_id(adev, ring->me, ring); + } + + return amdgpu_xcp_sched_list_update(adev); +} + +void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + + xcp_mgr->supp_xcp_modes = 0; + + switch (NUM_XCC(adev->gfx.xcc_mask)) { + case 8: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_QPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 6: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_TPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 4: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 2: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 1: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + + default: + break; + } +} + +int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); + + return 0; +} + +int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int ret = 0; + + if (flags & AMDGPU_XCP_OPS_KFD) { + amdgpu_amdkfd_device_probe(xcp_mgr->adev); + amdgpu_amdkfd_device_init(xcp_mgr->adev); + /* If KFD init failed, return failure */ + if (!xcp_mgr->adev->kfd.init_complete) + ret = -EIO; + } + + return ret; +} + +/*====================== xcp sysfs - configuration ======================*/ #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ struct amdgpu_xcp_res_details *xcp_res, char *buf) \ @@ -635,7 +876,7 @@ static const struct attribute *xcp_attrs[] = { NULL, }; -void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) +static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) { struct amdgpu_xcp_res_details *xcp_res; struct amdgpu_xcp_cfg *xcp_cfg; @@ -703,16 +944,16 @@ err1: kobject_put(&xcp_cfg->kobj); } -void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) +static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) { struct amdgpu_xcp_res_details *xcp_res; struct amdgpu_xcp_cfg *xcp_cfg; int i; - if (!adev->xcp_mgr) + if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg) return; - xcp_cfg = adev->xcp_mgr->xcp_cfg; + xcp_cfg = adev->xcp_mgr->xcp_cfg; for (i = 0; i < xcp_cfg->num_res; i++) { xcp_res = &xcp_cfg->xcp_res[i]; kobject_put(&xcp_res->kobj); @@ -722,3 +963,124 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); kobject_put(&xcp_cfg->kobj); } + +/*====================== xcp sysfs - data entries ======================*/ + +#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj) + +static ssize_t xcp_metrics_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct amdgpu_xcp *xcp = to_xcp(kobj); + struct amdgpu_xcp_mgr *xcp_mgr; + ssize_t size; + + xcp_mgr = xcp->xcp_mgr; + size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL); + if (size <= 0) + return size; + + if (size > PAGE_SIZE) + return -ENOSPC; + + return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf); +} + +static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct amdgpu_xcp *xcp = to_xcp(kobj); + + if (!xcp || !xcp->valid) + return 0; + + return attr->mode; +} + +static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics); + +static struct attribute *amdgpu_xcp_attrs[] = { + &xcp_sysfs_metrics.attr, + NULL, +}; + +static const struct attribute_group amdgpu_xcp_attrs_group = { + .attrs = amdgpu_xcp_attrs, + .is_visible = amdgpu_xcp_attrs_is_visible +}; + +static const struct kobj_type xcp_sysfs_ktype = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n) +{ + struct amdgpu_xcp *xcp; + + for (n--; n >= 0; n--) { + xcp = &xcp_mgr->xcp[n]; + if (!xcp->ddev || !xcp->valid) + continue; + sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group); + kobject_put(&xcp->kobj); + } +} + +static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_xcp *xcp; + int i, r; + + for (i = 0; i < MAX_XCP; i++) { + /* Redirect all IOCTLs to the primary device */ + xcp = &xcp_mgr->xcp[i]; + if (!xcp->ddev) + break; + r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype, + &xcp->ddev->dev->kobj, "xcp"); + if (r) + goto out; + + r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group); + if (r) + goto out; + } + + return; +out: + kobject_put(&xcp->kobj); +} + +static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_xcp *xcp; + int i; + + for (i = 0; i < MAX_XCP; i++) { + /* Redirect all IOCTLs to the primary device */ + xcp = &xcp_mgr->xcp[i]; + if (!xcp->ddev) + continue; + sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group); + } + + return; +} + +void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev) +{ + if (!adev->xcp_mgr) + return; + + amdgpu_xcp_cfg_sysfs_init(adev); + + return; +} + +void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev) +{ + if (!adev->xcp_mgr) + return; + amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP); + amdgpu_xcp_cfg_sysfs_fini(adev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index b63f53242c57..70a0f8400b57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -39,6 +39,8 @@ #define AMDGPU_XCP_NO_PARTITION (~0) +#define AMDGPU_XCP_OPS_KFD (1 << 0) + struct amdgpu_fpriv; enum AMDGPU_XCP_IP_BLOCK { @@ -108,6 +110,8 @@ struct amdgpu_xcp { struct drm_driver *driver; struct drm_vma_offset_manager *vma_offset_manager; struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; + struct amdgpu_xcp_mgr *xcp_mgr; + struct kobject kobj; }; struct amdgpu_xcp_mgr { @@ -142,10 +146,6 @@ struct amdgpu_xcp_mgr_funcs { int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); - int (*select_scheds)(struct amdgpu_device *adev, - u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv, - unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds); - int (*update_partition_sched_list)(struct amdgpu_device *adev); }; int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -174,18 +174,17 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, struct drm_file *file_priv); void amdgpu_xcp_release_sched(struct amdgpu_device *adev, struct amdgpu_ctx_entity *entity); - -void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev); -void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev); - -#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ - ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ - (adev)->xcp_mgr->funcs->select_scheds ? \ - (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT) -#define amdgpu_xcp_update_partition_sched_list(adev) \ - ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ - (adev)->xcp_mgr->funcs->update_partition_sched_list ? \ - (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0) +int amdgpu_xcp_select_scheds(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds); +void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev); +int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); +int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); +void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev); +void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev); static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 6029a799074d..1ede308a7c67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -294,17 +294,46 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = { SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)}, }; +int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num) +{ + int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 }; + + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + case IP_VERSION(6, 4, 1): + if (link_num < ARRAY_SIZE(link_map_6_4_x)) + return link_map_6_4_x[link_num]; + break; + default: + return -EINVAL; + } + + return -EINVAL; +} + static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num) { - const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070; - const int xgmi_inst = 2; - u32 link_inst; + const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 }; + const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070, + 0x11b00070 }; + u32 i, n; u64 addr; - link_inst = global_link_num % xgmi_inst; + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { + case IP_VERSION(6, 4, 0): + n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1); + addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n]; + break; + case IP_VERSION(6, 4, 1): + n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1); + addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n]; + break; + default: + return U32_MAX; + } - addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) + - adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst); + i = global_link_num / n; + addr += adev->asic_funcs->encode_ext_smn_addressing(i); return RREG32_PCIE_EXT(addr); } @@ -1172,7 +1201,7 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban break; case ACA_SMU_TYPE_CE: count = ext_error_code == 6 ? count : 0ULL; - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); + bank->aca_err_type = ACA_ERROR_TYPE_CE; ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count); break; default: @@ -1742,16 +1771,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): - adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT; + /* 25 GT/s */ + adev->gmc.xgmi.max_speed = 25; adev->gmc.xgmi.max_width = 16; break; case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): case IP_VERSION(9, 5, 0): - adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT; + /* 32 GT/s */ + adev->gmc.xgmi.max_speed = 32; adev->gmc.xgmi.max_width = 16; break; default: break; } } + +void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, + uint16_t max_speed, uint8_t max_width) +{ + adev->gmc.xgmi.max_speed = max_speed; + adev->gmc.xgmi.max_width = max_width; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 32dabba4062f..bba0b26fee8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -25,12 +25,6 @@ #include <drm/task_barrier.h> #include "amdgpu_ras.h" -enum amdgpu_xgmi_link_speed { - XGMI_SPEED_16GT = 16, - XGMI_SPEED_25GT = 25, - XGMI_SPEED_32GT = 32 -}; - struct amdgpu_hive_info { struct kobject kobj; uint64_t hive_id; @@ -97,7 +91,7 @@ struct amdgpu_xgmi { struct ras_common_if *ras_if; bool connected_to_cpu; struct amdgpu_xgmi_ras *ras; - enum amdgpu_xgmi_link_speed max_speed; + uint16_t max_speed; uint8_t max_width; }; @@ -125,8 +119,11 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev, int req_nps_mode); int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num); +int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num); void amdgpu_xgmi_early_init(struct amdgpu_device *adev); uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev); +void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, + uint16_t max_speed, uint8_t max_width); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index d6ac2652f0ac..33edad1f9dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -109,10 +109,12 @@ union amd_sriov_msg_feature_flags { union amd_sriov_reg_access_flags { struct { - uint32_t vf_reg_access_ih : 1; - uint32_t vf_reg_access_mmhub : 1; - uint32_t vf_reg_access_gc : 1; - uint32_t reserved : 29; + uint32_t vf_reg_access_ih : 1; + uint32_t vf_reg_access_mmhub : 1; + uint32_t vf_reg_access_gc : 1; + uint32_t vf_reg_access_l1_tlb_cntl : 1; + uint32_t vf_reg_access_sq_config : 1; + uint32_t reserved : 27; } flags; uint32_t all; }; @@ -330,6 +332,7 @@ enum amd_sriov_mailbox_request_message { MB_REQ_MSG_RAS_POISON = 202, MB_REQ_RAS_ERROR_COUNT = 203, MB_REQ_RAS_CPER_DUMP = 204, + MB_REQ_RAS_BAD_PAGES = 205, }; /* mailbox message send from host to guest */ @@ -347,6 +350,9 @@ enum amd_sriov_mailbox_response_message { MB_RES_MSG_GPU_RMA = 10, MB_RES_MSG_RAS_ERROR_COUNT_READY = 11, MB_REQ_RAS_CPER_DUMP_READY = 14, + MB_RES_MSG_RAS_BAD_PAGES_READY = 15, + MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16, + MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17, MB_RES_MSG_TEXT_MESSAGE = 255 }; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 3c07517be09a..914cf4bfb033 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -29,12 +29,11 @@ #include "gfx_v9_4_3.h" #include "gfxhub_v1_2.h" #include "sdma_v4_4_2.h" +#include "amdgpu_ip.h" #define XCP_INST_MASK(num_inst, xcp_id) \ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) -#define AMDGPU_XCP_OPS_KFD (1 << 0) - void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) { int i; @@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } -static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) -{ - return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); -} - -static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, - uint32_t inst_idx, struct amdgpu_ring *ring) -{ - int xcp_id; - enum AMDGPU_XCP_IP_BLOCK ip_blk; - uint32_t inst_mask; - - ring->xcp_id = AMDGPU_XCP_NO_PARTITION; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; - if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || - (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) - return; - - inst_mask = 1 << inst_idx; - - switch (ring->funcs->type) { - case AMDGPU_HW_IP_GFX: - case AMDGPU_RING_TYPE_COMPUTE: - case AMDGPU_RING_TYPE_KIQ: - ip_blk = AMDGPU_XCP_GFX; - break; - case AMDGPU_RING_TYPE_SDMA: - ip_blk = AMDGPU_XCP_SDMA; - break; - case AMDGPU_RING_TYPE_VCN_ENC: - case AMDGPU_RING_TYPE_VCN_JPEG: - ip_blk = AMDGPU_XCP_VCN; - break; - default: - DRM_ERROR("Not support ring type %d!", ring->funcs->type); - return; - } - - for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { - if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { - ring->xcp_id = xcp_id; - dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, - ring->xcp_id); - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; - break; - } - } -} - -static void aqua_vanjaram_xcp_gpu_sched_update( - struct amdgpu_device *adev, - struct amdgpu_ring *ring, - unsigned int sel_xcp_id) -{ - unsigned int *num_gpu_sched; - - num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] - .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; - adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] - .sched[(*num_gpu_sched)++] = &ring->sched; - DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name, - sel_xcp_id, ring->funcs->type, - ring->hw_prio, *num_gpu_sched); -} - -static int aqua_vanjaram_xcp_sched_list_update( - struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - int i; - - for (i = 0; i < MAX_XCP; i++) { - atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); - memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); - } - - if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) - return 0; - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - ring = adev->rings[i]; - if (!ring || !ring->sched.ready || ring->no_scheduler) - continue; - - aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); - - /* VCN may be shared by two partitions under CPX MODE in certain - * configs. - */ - if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || - ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && - aqua_vanjaram_xcp_vcn_shared(adev)) - aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); - } - - return 0; -} - -static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || - ring->funcs->type == AMDGPU_RING_TYPE_KIQ) - aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring); - else - aqua_vanjaram_set_xcp_id(adev, ring->me, ring); - } - - return aqua_vanjaram_xcp_sched_list_update(adev); -} - -static int aqua_vanjaram_select_scheds( - struct amdgpu_device *adev, - u32 hw_ip, - u32 hw_prio, - struct amdgpu_fpriv *fpriv, - unsigned int *num_scheds, - struct drm_gpu_scheduler ***scheds) -{ - u32 sel_xcp_id; - int i; - - if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { - u32 least_ref_cnt = ~0; - - fpriv->xcp_id = 0; - for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { - u32 total_ref_cnt; - - total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); - if (total_ref_cnt < least_ref_cnt) { - fpriv->xcp_id = i; - least_ref_cnt = total_ref_cnt; - } - } - } - sel_xcp_id = fpriv->xcp_id; - - if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { - *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; - *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; - atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); - DRM_DEBUG("Selected partition #%d", sel_xcp_id); - } else { - DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id); - return -ENOENT; - } - - return 0; -} - -static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, - enum amd_hw_ip_block_type block, - int8_t inst) -{ - int8_t dev_inst; - - switch (block) { - case GC_HWIP: - case SDMA0_HWIP: - /* Both JPEG and VCN as JPEG is only alias of VCN */ - case VCN_HWIP: - dev_inst = adev->ip_map.dev_inst[block][inst]; - break; - default: - /* For rest of the IPs, no look up required. - * Assume 'logical instance == physical instance' for all configs. */ - dev_inst = inst; - break; - } - - return dev_inst; -} - -static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev, - enum amd_hw_ip_block_type block, - uint32_t mask) -{ - uint32_t dev_mask = 0; - int8_t log_inst, dev_inst; - - while (mask) { - log_inst = ffs(mask) - 1; - dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst); - dev_mask |= (1 << dev_inst); - mask &= ~(1 << log_inst); - } - - return dev_mask; -} - -static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, - enum amd_hw_ip_block_type ip_block, - uint32_t inst_mask) -{ - int l = 0, i; - - while (inst_mask) { - i = ffs(inst_mask) - 1; - adev->ip_map.dev_inst[ip_block][l++] = i; - inst_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[ip_block][l] = -1; -} - -void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) -{ - u32 ip_map[][2] = { - { GC_HWIP, adev->gfx.xcc_mask }, - { SDMA0_HWIP, adev->sdma.sdma_mask }, - { VCN_HWIP, adev->vcn.inst_mask }, - }; - int i; - - for (i = 0; i < ARRAY_SIZE(ip_map); ++i) - aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); - - adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; - adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask; -} - /* Fixed pattern for smn addressing on different AIDs: * bit[34]: indicate cross AID access * bit[33:32]: indicate target AID id @@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) if (adev->nbio.funcs->get_compute_partition_mode) { mode = adev->nbio.funcs->get_compute_partition_mode(adev); - if (mode != derv_mode) + if (mode != derv_mode) { dev_warn( adev->dev, "Mismatch in compute partition mode - reported : %d derived : %d", mode, derv_mode); + if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_device_bus_status_check(adev); + } } return mode; @@ -448,52 +222,71 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x return 0; } -static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, - int mode, - struct amdgpu_xcp_cfg *xcp_cfg) +static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, + int px_mode, int *num_xcp, + uint16_t *nps_modes) { struct amdgpu_device *adev = xcp_mgr->adev; - int max_res[AMDGPU_XCP_RES_MAX] = {}; - bool res_lt_xcp; - int num_xcp, i; - u16 nps_modes; - if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode))) return -EINVAL; - max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); - max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; - max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; - max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; - - switch (mode) { + switch (px_mode) { case AMDGPU_SPX_PARTITION_MODE: - num_xcp = 1; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + *num_xcp = 1; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); break; case AMDGPU_DPX_PARTITION_MODE: - num_xcp = 2; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); + *num_xcp = 2; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS2_PARTITION_MODE); break; case AMDGPU_TPX_PARTITION_MODE: - num_xcp = 3; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS4_PARTITION_MODE); + *num_xcp = 3; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); break; case AMDGPU_QPX_PARTITION_MODE: - num_xcp = 4; - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS4_PARTITION_MODE); + *num_xcp = 4; + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); break; case AMDGPU_CPX_PARTITION_MODE: - num_xcp = NUM_XCC(adev->gfx.xcc_mask); - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | - BIT(AMDGPU_NPS4_PARTITION_MODE); + *num_xcp = NUM_XCC(adev->gfx.xcc_mask); + *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | + BIT(AMDGPU_NPS4_PARTITION_MODE); + if (amdgpu_sriov_vf(adev)) + *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; default: return -EINVAL; } + return 0; +} + +static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, + struct amdgpu_xcp_cfg *xcp_cfg) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int max_res[AMDGPU_XCP_RES_MAX] = {}; + bool res_lt_xcp; + int num_xcp, i, r; + u16 nps_modes; + + if (!(xcp_mgr->supp_xcp_modes & BIT(mode))) + return -EINVAL; + + max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); + max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; + max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; + max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; + + r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes); + if (r) + return r; + xcp_cfg->compatible_nps_modes = (adev->gmc.supported_nps_modes & nps_modes); xcp_cfg->num_res = ARRAY_SIZE(max_res); @@ -542,30 +335,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { struct amdgpu_device *adev = xcp_mgr->adev; - int num_xcc, num_xccs_per_xcp; + int num_xcc, num_xccs_per_xcp, r; + int num_xcp, nps_mode; + u16 supp_nps_modes; + bool comp_mode; + + nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, + &supp_nps_modes); + if (r) + return false; + comp_mode = !!(BIT(nps_mode) & supp_nps_modes); num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; + return comp_mode && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0; + return comp_mode && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: - return (adev->gmc.num_mem_partitions == 1 || - adev->gmc.num_mem_partitions == 3) && - ((num_xcc % 3) == 0); + return comp_mode && ((num_xcc % 3) == 0); case AMDGPU_QPX_PARTITION_MODE: num_xccs_per_xcp = num_xcc / 4; - return (adev->gmc.num_mem_partitions == 1 || - adev->gmc.num_mem_partitions == 4) && - (num_xccs_per_xcp >= 2); + return comp_mode && (num_xccs_per_xcp >= 2); case AMDGPU_CPX_PARTITION_MODE: - /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX. - * (num_xcc % adev->gmc.num_mem_partitions) == 0 because - * num_compute_partitions can't be less than num_mem_partitions - */ - return ((num_xcc > 1) && - (num_xcc % adev->gmc.num_mem_partitions) == 0); + return comp_mode && (num_xcc > 1); default: return false; } @@ -573,72 +367,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, return false; } -static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) -{ - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ - - if (flags & AMDGPU_XCP_OPS_KFD) - amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); - - return 0; -} - -static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) -{ - int ret = 0; - - if (flags & AMDGPU_XCP_OPS_KFD) { - amdgpu_amdkfd_device_probe(xcp_mgr->adev); - amdgpu_amdkfd_device_init(xcp_mgr->adev); - /* If KFD init failed, return failure */ - if (!xcp_mgr->adev->kfd.init_complete) - ret = -EIO; - } - - return ret; -} - -static void -__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) -{ - struct amdgpu_device *adev = xcp_mgr->adev; - - xcp_mgr->supp_xcp_modes = 0; - - switch (NUM_XCC(adev->gfx.xcc_mask)) { - case 8: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_DPX_PARTITION_MODE) | - BIT(AMDGPU_QPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 6: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_TPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 4: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_DPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - /* this seems only existing in emulation phase */ - case 2: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 1: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - - default: - break; - } -} - static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) { int mode; @@ -685,7 +413,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto out; } - ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); + ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags); if (ret) goto unlock; @@ -698,7 +426,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, *num_xcps = num_xcc / num_xcc_per_xcp; amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); - ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags); if (!ret) __aqua_vanjaram_update_available_partition_mode(xcp_mgr); unlock: @@ -781,9 +509,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, - .select_scheds = &aqua_vanjaram_select_scheds, - .update_partition_sched_list = - &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) @@ -798,7 +523,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) if (ret) return ret; - __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); + amdgpu_xcp_update_supported_modes(adev->xcp_mgr); /* TODO: Default memory node affinity init */ return ret; @@ -838,7 +563,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) if (ret) return ret; - aqua_vanjaram_ip_map_init(adev); + amdgpu_ip_map_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 81d195d366ce..427b073de2fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1444,6 +1444,7 @@ static void atom_get_vbios_pn(struct atom_context *ctx) if (vbios_str == NULL) vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1; } + OPTIMIZER_HIDE_VAR(vbios_str); if (vbios_str != NULL && *vbios_str == 0) vbios_str++; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 521b9faab180..492813ab1b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect u8 link_status[DP_LINK_STATUS_SIZE]; struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv; - if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status) - <= 0) + if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, + link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; @@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 508cea965983..9e8715b4739d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -56,6 +56,8 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block); +u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); + MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin"); @@ -67,9 +69,6 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin"); MODULE_FIRMWARE("amdgpu/mullins_sdma.bin"); MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin"); -u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); - - static void cik_sdma_free_microcode(struct amdgpu_device *adev) { int i; @@ -993,14 +992,9 @@ static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block) static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block) { - int r; struct amdgpu_device *adev = ip_block->adev; - r = cik_sdma_start(adev); - if (r) - return r; - - return r; + return cik_sdma_start(adev); } static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block) @@ -1040,14 +1034,10 @@ static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block) static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned i; - u32 tmp; struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | - SRBM_STATUS2__SDMA1_BUSY_MASK); - - if (!tmp) + if (cik_sdma_is_idle(ip_block)) return 0; udelay(1); } diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 279288365940..8aca4f2734f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -60,9 +60,6 @@ #define AUD5_REGISTER_OFFSET (0x179d - 0x1780) #define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780) -#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 -#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 - #define PIPEID(x) ((x) << 0) #define MEID(x) ((x) << 2) #define VMID(x) ((x) << 4) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index df401aded662..bf7c22f81cda 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3075,7 +3075,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev, u32 tmp; if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", hpd); + DRM_DEBUG("invalid hpd %d\n", hpd); return 0; } @@ -3227,7 +3227,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, u32 tmp; if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", hpd); + DRM_DEBUG("invalid hpd %d\n", hpd); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 80f01c3989cd..47e05783c4a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3206,7 +3206,7 @@ static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, u32 tmp; if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", hpd); + DRM_DEBUG("invalid hpd %d\n", hpd); return 0; } @@ -3358,7 +3358,7 @@ static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, u32 tmp; if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", hpd); + DRM_DEBUG("invalid hpd %d\n", hpd); return; } @@ -3488,8 +3488,7 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = { .set_powergating_state = dce_v11_0_set_powergating_state, }; -static void -dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, +static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 255c70959343..276c025c4c03 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -287,7 +287,7 @@ static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev, u32 tmp; if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", hpd); + DRM_DEBUG("invalid hpd %d\n", hpd); return; } @@ -412,7 +412,7 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, { if (!render) WREG32(mmVGA_RENDER_CONTROL, - RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL); + RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK); } static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev) @@ -1011,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, /* select wm A */ arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); tmp = arb_control3; - tmp &= ~LATENCY_WATERMARK_MASK(3); - tmp |= LATENCY_WATERMARK_MASK(1); + tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); + tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); /* select wm B */ tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); - tmp &= ~LATENCY_WATERMARK_MASK(3); - tmp |= LATENCY_WATERMARK_MASK(2); + tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); + tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT); WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | @@ -1089,7 +1089,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev, } WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, - DC_LB_MEMORY_CONFIG(tmp)); + (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT)); WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); @@ -1306,6 +1306,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 offset; struct drm_connector *connector; struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; @@ -1327,6 +1328,11 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + offset = dig->afmt->pin->offset; + drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { @@ -1348,7 +1354,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) return; for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { - u32 tmp = 0; + u32 value = 0; u8 stereo_freqs = 0; int max_channels = -1; int j; @@ -1358,12 +1364,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) if (sad->format == eld_reg_to_type[i][1]) { if (sad->channels > max_channels) { - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - MAX_CHANNELS, sad->channels); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - DESCRIPTOR_BYTE_2, sad->byte2); - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - SUPPORTED_FREQUENCIES, sad->freq); + value = (sad->channels << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | + (sad->byte2 << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | + (sad->freq << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); max_channels = sad->channels; } @@ -1374,13 +1380,13 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) } } - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, - SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); + value |= (stereo_freqs << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT); + + WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value); } kfree(sads); - } static void dce_v6_0_audio_enable(struct amdgpu_device *adev, @@ -1886,7 +1892,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_bo *abo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels, pipe_config; - u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE); + u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); u32 viewport_w, viewport_h; int r; bool bypass_lut = false; @@ -1926,76 +1932,76 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, switch (target_fb->format->format) { case DRM_FORMAT_C8: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) | - GRPH_FORMAT(GRPH_FORMAT_INDEXED)); + fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); break; case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | - GRPH_FORMAT(GRPH_FORMAT_ARGB4444)); + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif break; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_ARGB1555: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | - GRPH_FORMAT(GRPH_FORMAT_ARGB1555)); + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif break; case DRM_FORMAT_BGRX5551: case DRM_FORMAT_BGRA5551: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | - GRPH_FORMAT(GRPH_FORMAT_BGRA5551)); + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif break; case DRM_FORMAT_RGB565: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | - GRPH_FORMAT(GRPH_FORMAT_ARGB565)); + fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); + fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | - GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); + fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | - GRPH_FORMAT(GRPH_FORMAT_ARGB2101010)); + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); + fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_BGRA1010102: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | - GRPH_FORMAT(GRPH_FORMAT_BGRA1010102)); + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); + fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: - fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | - GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); - fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) | - GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R)); + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) | + (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT)); #ifdef __BIG_ENDIAN - fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); + fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif break; default: @@ -2013,18 +2019,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - fb_format |= GRPH_NUM_BANKS(num_banks); - fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1); - fb_format |= GRPH_TILE_SPLIT(tile_split); - fb_format |= GRPH_BANK_WIDTH(bankw); - fb_format |= GRPH_BANK_HEIGHT(bankh); - fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect); + fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); + fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); + fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); + fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT); + fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); + fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { - fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1); + fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); } pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); - fb_format |= GRPH_PIPE_CONFIG(pipe_config); + fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); dce_v6_0_vga_enable(crtc, false); @@ -2040,7 +2046,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, - (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); + (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); @@ -2108,14 +2114,13 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc, if (mode->flags & DRM_MODE_FLAG_INTERLACE) WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, - INTERLEAVE_EN); + DATA_FORMAT__INTERLEAVE_EN_MASK); else WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0); } static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = drm_to_adev(dev); @@ -2125,15 +2130,15 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, - ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | - (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); + ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | + (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, - ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | - (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); + ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | + (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); @@ -2160,19 +2165,19 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) } WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, - ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | - (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | - ICON_DEGAMMA_MODE(0) | - (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); + ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | + (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | + (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) | + (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, - ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | - (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); + ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | + (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, - ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | - (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); + ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | + (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, - ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | - (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); + ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | + (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); /* XXX match this to the depth of the crtc fmt block, move to modeset? */ WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); @@ -2267,8 +2272,6 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); - - } static void dce_v6_0_show_cursor(struct drm_crtc *crtc) @@ -2285,7 +2288,6 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc) CUR_CONTROL__CURSOR_EN_MASK | (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); - } static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, @@ -2596,7 +2598,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; @@ -2669,7 +2670,7 @@ static void dce_v6_0_panic_flush(struct drm_plane *plane) /* Disable DC tiling */ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset); - fb_format &= ~GRPH_ARRAY_MODE(0x7); + fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK; WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); } @@ -2745,7 +2746,6 @@ static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block) static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block) { int r, i; - bool ret; struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2789,8 +2789,7 @@ static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block) return r; } - ret = amdgpu_atombios_get_connector_info_from_object_table(adev); - if (ret) + if (amdgpu_atombios_get_connector_info_from_object_table(adev)) amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2986,12 +2985,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: interrupt_mask = RREG32(mmINT_MASK + reg_block); - interrupt_mask &= ~VBLANK_INT_MASK; + interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK; WREG32(mmINT_MASK + reg_block, interrupt_mask); break; case AMDGPU_IRQ_STATE_ENABLE: interrupt_mask = RREG32(mmINT_MASK + reg_block); - interrupt_mask |= VBLANK_INT_MASK; + interrupt_mask |= INT_MASK__VBLANK_INT_MASK; WREG32(mmINT_MASK + reg_block, interrupt_mask); break; default: @@ -3006,28 +3005,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, } -static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, +static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, - unsigned type, + unsigned hpd, enum amdgpu_interrupt_state state) { u32 dc_hpd_int_cntl; - if (type >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", type); + if (hpd >= adev->mode_info.num_hpd) { + DRM_DEBUG("invalid hpd %d\n", hpd); return 0; } switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); - dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; - WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); + dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); + dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); - dc_hpd_int_cntl |= DC_HPDx_INT_EN; - WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); + dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); + dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; + WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl); break; default: break; @@ -3036,7 +3035,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, return 0; } -static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev, +static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) @@ -3096,7 +3095,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, switch (entry->src_data[0]) { case 0: /* vblank */ if (disp_int & interrupt_status_offsets[crtc].vblank) - WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); + WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK); else DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); @@ -3107,7 +3106,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, break; case 1: /* vline */ if (disp_int & interrupt_status_offsets[crtc].vline) - WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK); + WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK); else DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); @@ -3121,7 +3120,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, return 0; } -static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, +static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) @@ -3172,7 +3171,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, @@ -3249,12 +3248,10 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = { .set_powergating_state = dce_v6_0_set_powergating_state, }; -static void -dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, +static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); @@ -3274,7 +3271,6 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3314,7 +3310,6 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; struct amdgpu_device *adev = drm_to_adev(dev); @@ -3325,7 +3320,6 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) static void dce_v6_0_encoder_disable(struct drm_encoder *encoder) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig; int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); @@ -3541,17 +3535,17 @@ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = { - .set = dce_v6_0_set_crtc_interrupt_state, + .set = dce_v6_0_set_crtc_irq_state, .process = dce_v6_0_crtc_irq, }; static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = { - .set = dce_v6_0_set_pageflip_interrupt_state, + .set = dce_v6_0_set_pageflip_irq_state, .process = dce_v6_0_pageflip_irq, }; static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = { - .set = dce_v6_0_set_hpd_interrupt_state, + .set = dce_v6_0_set_hpd_irq_state, .process = dce_v6_0_hpd_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 07358546581f..e62ccf9eb73d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -271,7 +271,7 @@ static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev, u32 tmp; if (hpd >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", hpd); + DRM_DEBUG("invalid hpd %d\n", hpd); return; } @@ -3021,7 +3021,7 @@ static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, } } -static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, +static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) @@ -3029,7 +3029,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, u32 dc_hpd_int_cntl; if (type >= adev->mode_info.num_hpd) { - DRM_DEBUG("invalid hdp %d\n", type); + DRM_DEBUG("invalid hpd %d\n", type); return 0; } @@ -3051,7 +3051,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, return 0; } -static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev, +static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) @@ -3136,7 +3136,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, return 0; } -static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, +static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) @@ -3547,17 +3547,17 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = { - .set = dce_v8_0_set_crtc_interrupt_state, + .set = dce_v8_0_set_crtc_irq_state, .process = dce_v8_0_crtc_irq, }; static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = { - .set = dce_v8_0_set_pageflip_interrupt_state, + .set = dce_v8_0_set_pageflip_irq_state, .process = dce_v8_0_pageflip_irq, }; static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = { - .set = dce_v8_0_set_hpd_interrupt_state, + .set = dce_v8_0_set_hpd_irq_state, .process = dce_v8_0_hpd_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 6d514efb0a6d..7bd506f06eb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -368,11 +368,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = { SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR), SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST), /* cp header registers */ - SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_HEADER_DUMP), /* SE status registers */ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0), @@ -421,7 +416,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = { SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET), SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS) + SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS), + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), }; static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = { @@ -448,7 +452,32 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = { SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR), SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR_HI), SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI) + SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI), + /* gfx header registers */ + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), }; static const struct soc15_reg_golden golden_settings_gc_10_1[] = { @@ -4296,9 +4325,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev) static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; int ctx_reg_offset; if (adev->gfx.rlc.cs_data == NULL) @@ -4306,39 +4333,15 @@ static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); - ctx_reg_offset = - SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; + ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); buffer[count++] = cpu_to_le32(ctx_reg_offset); buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + amdgpu_gfx_csb_preamble_end(buffer, count); } static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev) @@ -4752,6 +4755,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) int i, j, k, r, ring_id = 0; int xcc_id = 0; struct amdgpu_device *adev = ip_block->adev; + int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); @@ -4763,7 +4767,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(10, 1, 4): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; - adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.me.num_queue_per_pipe = 8; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; @@ -4778,7 +4782,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(10, 3, 7): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 2; - adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.me.num_queue_per_pipe = 2; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; @@ -4800,7 +4804,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex); if (adev->gfx.me_fw_version >= 101 && adev->gfx.pfp_fw_version >= 158 && - adev->gfx.mec_fw_version >= 152) { + adev->gfx.mec_fw_version >= 151) { adev->gfx.enable_cleaner_shader = true; r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); if (r) { @@ -4810,7 +4814,9 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) } break; case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 4): case IP_VERSION(10, 3, 5): adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex; @@ -4826,6 +4832,34 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) } } break; + case IP_VERSION(10, 3, 6): + adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 14 && + adev->gfx.pfp_fw_version >= 17 && + adev->gfx.mec_fw_version >= 24) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; + case IP_VERSION(10, 3, 7): + adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 4 && + adev->gfx.pfp_fw_version >= 9 && + adev->gfx.mec_fw_version >= 12) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; default: adev->gfx.enable_cleaner_shader = false; break; @@ -4886,7 +4920,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) /* set up the gfx ring */ for (i = 0; i < adev->gfx.me.num_me; i++) { - for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { + for (j = 0; j < num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) continue; @@ -4918,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) } } } - /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { @@ -6114,7 +6152,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -6192,7 +6230,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -6269,7 +6307,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -6644,7 +6682,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -6851,22 +6889,9 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { int r, i; - struct amdgpu_ring *ring; for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v10_0_kgq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v10_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); if (r) return r; } @@ -7173,55 +7198,24 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore) static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - int r; - - ring = &adev->gfx.kiq[0].ring; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } - - gfx_v10_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); + gfx_v10_0_kiq_init_queue(&adev->gfx.kiq[0].ring); return 0; } static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; + int i, r; gfx_v10_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v10_0_kcq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v10_0_kcq_init_queue(&adev->gfx.compute_ring[i], + false); if (r) - goto done; + return r; } - r = amdgpu_gfx_enable_kcq(adev, 0); -done: - return r; + return amdgpu_gfx_enable_kcq(adev, 0); } static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) @@ -9056,21 +9050,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } -static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, mmSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -9532,7 +9511,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_insert_nop(ring, num_nop - 1); } -static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -9542,15 +9523,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) u64 addr; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); - if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) { + if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) { spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; } @@ -9570,37 +9550,37 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 0, 1, 0x20); gfx_v10_0_ring_emit_reg_wait(kiq_ring, SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff); - kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) { - DRM_ERROR("fail to resv mqd_obj\n"); + r = gfx_v10_0_kgq_init_queue(ring, true); + if (r) { + DRM_ERROR("fail to init kgq\n"); return r; } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v10_0_kgq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; } - amdgpu_bo_unreserve(ring->mqd_obj); - if (r) { - DRM_ERROR("fail to unresv mqd_obj\n"); + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + if (r) return r; - } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -9608,12 +9588,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -9624,9 +9603,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; @@ -9649,20 +9627,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, return r; } - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) { - dev_err(adev->dev, "fail to resv mqd_obj\n"); - return r; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v10_0_kcq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v10_0_kcq_init_queue(ring, true); if (r) { - dev_err(adev->dev, "fail to unresv mqd_obj\n"); + dev_err(adev->dev, "fail to init kcq\n"); return r; } @@ -9673,13 +9640,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -9711,9 +9677,14 @@ static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); for (reg = 0; reg < reg_count; reg++) { - drm_printf(p, "%-50s \t 0x%08x\n", - gc_cp_reg_list_10[reg].reg_name, - adev->gfx.ip_dump_compute_queues[index + reg]); + if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP) + drm_printf(p, "%-50s \t 0x%08x\n", + "mmCP_MEC_ME2_HEADER_DUMP", + adev->gfx.ip_dump_compute_queues[index + reg]); + else + drm_printf(p, "%-50s \t 0x%08x\n", + gc_cp_reg_list_10[reg].reg_name, + adev->gfx.ip_dump_compute_queues[index + reg]); } index += reg_count; } @@ -9774,9 +9745,13 @@ static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block) nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); for (reg = 0; reg < reg_count; reg++) { - adev->gfx.ip_dump_compute_queues[index + reg] = - RREG32(SOC15_REG_ENTRY_OFFSET( - gc_cp_reg_list_10[reg])); + if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP) + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP)); + else + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET( + gc_cp_reg_list_10[reg])); } index += reg_count; } @@ -9905,7 +9880,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kgq, .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, @@ -9946,7 +9920,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kcq, .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h index 5255378af53c..f67569ccf9f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h @@ -43,9 +43,9 @@ static const u32 gfx_10_1_10_cleaner_shader_hex[] = { 0xd70f6a01, 0x000202ff, 0x00000400, 0x80828102, 0xbf84fff7, 0xbefc03ff, - 0x00000068, 0xbe803080, - 0xbe813080, 0xbe823080, - 0xbe833080, 0x80fc847c, + 0x00000068, 0xbe803000, + 0xbe813000, 0xbe823000, + 0xbe833000, 0x80fc847c, 0xbf84fffa, 0xbeea0480, 0xbeec0480, 0xbeee0480, 0xbef00480, 0xbef20480, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm index 9ba3359253c9..54f7ed9e2801 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm @@ -40,7 +40,6 @@ shader main type(CS) wave_size(32) // Note: original source code from SQ team - // // Create 32 waves in a threadgroup (CS waves) // Each allocates 64 VGPRs @@ -71,8 +70,8 @@ label_0005: s_sub_u32 s2, s2, 8 s_cbranch_scc0 label_0005 // - s_mov_b32 s2, 0x80000000 // Bit31 is first_wave - s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set + s_mov_b32 s2, 0x80000000 // Bit31 is first_wave + s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup // CLEAR LDS // @@ -99,10 +98,10 @@ label_001F: label_0023: s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance) label_sgpr_loop: - s_movreld_b32 s0, 0 - s_movreld_b32 s1, 0 - s_movreld_b32 s2, 0 - s_movreld_b32 s3, 0 + s_movreld_b32 s0, s0 + s_movreld_b32 s1, s0 + s_movreld_b32 s2, s0 + s_movreld_b32 s3, s0 s_sub_u32 m0, m0, 4 s_cbranch_scc0 label_sgpr_loop diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d8772cd6db63..c01c241a1b06 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -48,6 +48,8 @@ #include "gfx_v11_0_3.h" #include "nbio_v4_3.h" #include "mes_v11_0.h" +#include "mes_userqueue.h" +#include "amdgpu_userq_fence.h" #define GFX11_NUM_GFX_RINGS 1 #define GFX11_MEC_HPD_SIZE 2048 @@ -83,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); @@ -177,9 +180,13 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR), SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), /* cp header registers */ - SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), /* SE status registers */ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), @@ -230,7 +237,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), - SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS), + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), }; static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = { @@ -259,7 +275,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), - SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), }; static const struct soc15_reg_golden golden_settings_gc_11_0[] = { @@ -580,33 +613,18 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) memset(&ib, 0, sizeof(ib)); - if (ring->is_mes_queue) { - uint32_t padding, offset; - - offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); - padding = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - - ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); - *cpu_ptr = cpu_to_le32(0xCAFEDEAD); - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) - return r; + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); - cpu_ptr = &adev->wb.wb[index]; + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); + cpu_ptr = &adev->wb.wb[index]; - r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); - goto err1; - } + r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err1; } ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); @@ -633,12 +651,10 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) else r = -EINVAL; err2: - if (!ring->is_mes_queue) - amdgpu_ib_free(&ib, NULL); + amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err1: - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -744,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, AMDGPU_UCODE_REQUIRED, "amdgpu/gc_11_0_0_rlc_1.bin"); + else if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); else err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, AMDGPU_UCODE_REQUIRED, @@ -833,9 +853,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; int ctx_reg_offset; if (adev->gfx.rlc.cs_data == NULL) @@ -843,39 +861,15 @@ static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } - - ctx_reg_offset = - SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; + ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); buffer[count++] = cpu_to_le32(ctx_reg_offset); buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + amdgpu_gfx_csb_preamble_end(buffer, count); } static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) @@ -1056,14 +1050,21 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, #define MQD_FWWORKAREA_SIZE 484 #define MQD_FWWORKAREA_ALIGNMENT 256 -static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, +static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev, struct amdgpu_gfx_shadow_info *shadow_info) { - if (adev->gfx.cp_gfx_shadow) { - shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; - shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; - shadow_info->csa_size = MQD_FWWORKAREA_SIZE; - shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; + shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; + shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; + shadow_info->csa_size = MQD_FWWORKAREA_SIZE; + shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; +} + +static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev, + struct amdgpu_gfx_shadow_info *shadow_info, + bool skip_check) +{ + if (adev->gfx.cp_gfx_shadow || skip_check) { + gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info); return 0; } else { memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); @@ -1136,6 +1137,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; + if (adev->gfx.disable_kq) { + ring->no_scheduler = true; + ring->no_user_submission = true; + } if (!ring_id) ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; @@ -1568,24 +1573,18 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev) static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) { - int i, j, k, r, ring_id = 0; + int i, j, k, r, ring_id; int xcc_id = 0; struct amdgpu_device *adev = ip_block->adev; + int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): - adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 1; - adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; - adev->gfx.mec.num_pipe_per_mec = 4; - adev->gfx.mec.num_queue_per_pipe = 4; - break; - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): @@ -1593,7 +1592,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 5, 3): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; - adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.me.num_queue_per_pipe = 2; adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; @@ -1612,6 +1611,35 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): + if (!adev->gfx.disable_uq && + adev->gfx.me_fw_version >= 2390 && + adev->gfx.pfp_fw_version >= 2530 && + adev->gfx.mec_fw_version >= 2600 && + adev->mes.fw_version[0] >= 120) { + adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; + adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; + } + break; + case IP_VERSION(11, 0, 1): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): + case IP_VERSION(11, 5, 2): + case IP_VERSION(11, 5, 3): + /* add firmware version checks here */ + if (0 && !adev->gfx.disable_uq) { + adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; + adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; + } + break; + default: + break; + } + + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); if (adev->gfx.me_fw_version >= 2280 && @@ -1640,6 +1668,34 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) } } break; + case IP_VERSION(11, 5, 2): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 12 && + adev->gfx.pfp_fw_version >= 15 && + adev->gfx.mec_fw_version >= 15) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; + case IP_VERSION(11, 5, 3): + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 7 && + adev->gfx.pfp_fw_version >= 8 && + adev->gfx.mec_fw_version >= 8) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; default: adev->gfx.enable_cleaner_shader = false; break; @@ -1701,37 +1757,42 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) return r; } - /* set up the gfx ring */ - for (i = 0; i < adev->gfx.me.num_me; i++) { - for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { - for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { - if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) - continue; - - r = gfx_v11_0_gfx_ring_init(adev, ring_id, - i, k, j); - if (r) - return r; - ring_id++; + if (adev->gfx.num_gfx_rings) { + ring_id = 0; + /* set up the gfx ring */ + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { + if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) + continue; + + r = gfx_v11_0_gfx_ring_init(adev, ring_id, + i, k, j); + if (r) + return r; + ring_id++; + } } } } - ring_id = 0; - /* set up the compute queues - allocate horizontally across pipes */ - for (i = 0; i < adev->gfx.mec.num_mec; ++i) { - for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { - for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, - k, j)) - continue; + if (adev->gfx.num_compute_rings) { + ring_id = 0; + /* set up the compute queues - allocate horizontally across pipes */ + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) + continue; - r = gfx_v11_0_compute_ring_init(adev, ring_id, - i, k, j); - if (r) - return r; + r = gfx_v11_0_compute_ring_init(adev, ring_id, + i, k, j); + if (r) + return r; - ring_id++; + ring_id++; + } } } } @@ -1745,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 2280) && - (adev->gfx.mec_fw_version >= 2410)) { - adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + (adev->gfx.mec_fw_version >= 2410) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } break; default: + if (!amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } break; } @@ -2428,7 +2494,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -2472,7 +2538,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -2517,7 +2583,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) } if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -3153,7 +3219,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); @@ -3371,7 +3437,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); @@ -4061,6 +4127,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, #ifdef __BIG_ENDIAN tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); #endif + if (prop->tmz_queue) + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); mqd->cp_gfx_hqd_cntl = tmp; /* set up cp_doorbell_control */ @@ -4081,6 +4149,16 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, /* active the queue */ mqd->cp_gfx_hqd_active = 1; + /* set gfx UQ items */ + mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr); + mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr); + mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr); + mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr); + mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr); + mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr); + mqd->fence_address_lo = lower_32_bits(prop->fence_address); + mqd->fence_address_hi = upper_32_bits(prop->fence_address); + return 0; } @@ -4115,22 +4193,9 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { int r, i; - struct amdgpu_ring *ring; for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v11_0_kgq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); if (r) return r; } @@ -4218,6 +4283,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, prop->allow_tunneling); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + if (prop->tmz_queue) + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); mqd->cp_hqd_pq_control = tmp; /* set the wb address whether it's enabled or not */ @@ -4269,6 +4336,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_active = prop->hqd_active; + /* set UQ fenceaddress */ + mqd->fence_address_lo = lower_32_bits(prop->fence_address); + mqd->fence_address_hi = upper_32_bits(prop->fence_address); + return 0; } @@ -4452,57 +4523,24 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - int r; - - ring = &adev->gfx.kiq[0].ring; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } - - gfx_v11_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); - ring->sched.ready = true; + gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring); return 0; } static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; + int i, r; if (!amdgpu_async_gfx_ring) gfx_v11_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v11_0_kcq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); if (r) - goto done; + return r; } - r = amdgpu_gfx_enable_kcq(adev, 0); -done: - return r; + return amdgpu_gfx_enable_kcq(adev, 0); } static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) @@ -4555,11 +4593,23 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) return r; } - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - r = amdgpu_ring_test_helper(ring); - if (r) - return r; + if (adev->gfx.disable_kq) { + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + /* we don't want to set ring->ready */ + r = amdgpu_ring_test_ring(ring); + if (r) + return r; + } + if (amdgpu_async_gfx_ring) + amdgpu_gfx_disable_kgq(adev, 0); + } else { + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } } for (i = 0; i < adev->gfx.num_compute_rings; i++) { @@ -4587,7 +4637,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) if (r) return r; - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; @@ -4768,6 +4818,49 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block) return r; } +static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev, + bool enable) +{ + unsigned int irq_type; + int m, p, r; + + if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) { + for (m = 0; m < adev->gfx.me.num_me; m++) { + for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) { + irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p; + if (enable) + r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, + irq_type); + else + r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, + irq_type); + if (r) + return r; + } + } + } + + if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) { + for (m = 0; m < adev->gfx.mec.num_mec; ++m) { + for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) { + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + + (m * adev->gfx.mec.num_pipe_per_mec) + + p; + if (enable) + r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, + irq_type); + else + r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, + irq_type); + if (r) + return r; + } + } + } + + return 0; +} + static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -4777,9 +4870,11 @@ static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); + gfx_v11_0_set_userq_eop_interrupts(adev, false); if (!adev->no_hw_access) { - if (amdgpu_async_gfx_ring) { + if (amdgpu_async_gfx_ring && + !adev->gfx.disable_kq) { if (amdgpu_gfx_disable_kgq(adev, 0)) DRM_ERROR("KGQ disable failed\n"); } @@ -5105,11 +5200,36 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + switch (amdgpu_user_queue) { + case -1: + case 0: + default: + adev->gfx.disable_kq = false; + adev->gfx.disable_uq = true; + break; + case 1: + adev->gfx.disable_kq = false; + adev->gfx.disable_uq = false; + break; + case 2: + adev->gfx.disable_kq = true; + adev->gfx.disable_uq = false; + break; + } + adev->gfx.funcs = &gfx_v11_0_gfx_funcs; - adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), - AMDGPU_MAX_COMPUTE_RINGS); + if (adev->gfx.disable_kq) { + /* We need one GFX ring temporarily to set up + * the clear state. + */ + adev->gfx.num_gfx_rings = 1; + adev->gfx.num_compute_rings = 0; + } else { + adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); + } gfx_v11_0_set_kiq_pm4_funcs(adev); gfx_v11_0_set_ring_funcs(adev); @@ -5140,6 +5260,11 @@ static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block) r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); if (r) return r; + + r = gfx_v11_0_set_userq_eop_interrupts(adev, true); + if (r) + return r; + return 0; } @@ -5737,10 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); } - if (ring->is_mes_queue) - /* inherit vmid from mqd */ - control |= 0x400000; - amdgpu_ring_write(ring, header); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ amdgpu_ring_write(ring, @@ -5760,10 +5881,6 @@ static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); - if (ring->is_mes_queue) - /* inherit vmid from mqd */ - control |= 0x40000000; - /* Currently, there is a high possibility to get wave ID mismatch * between ME and GDS, leading to a hw deadlock, because ME generates * different wave IDs than the GDS expects. This situation happens @@ -5821,8 +5938,7 @@ static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, upper_32_bits(addr)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); - amdgpu_ring_write(ring, ring->is_mes_queue ? - (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); + amdgpu_ring_write(ring, 0); } static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) @@ -5850,10 +5966,7 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - if (ring->is_mes_queue) - gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); - else - amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* compute doesn't have PFP */ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { @@ -6082,28 +6195,13 @@ static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) void *de_payload_cpu_addr; int cnt; - if (ring->is_mes_queue) { - offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gfx_meta_data) + - offsetof(struct v10_gfx_meta_data, de_payload); - de_payload_gpu_addr = - amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - de_payload_cpu_addr = - amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gds_backup) + - offsetof(struct v10_gfx_meta_data, de_payload); - gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - } else { - offset = offsetof(struct v10_gfx_meta_data, de_payload); - de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; - de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + offset = offsetof(struct v10_gfx_meta_data, de_payload); + de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; - gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + - AMDGPU_CSA_SIZE - adev->gds.gds_size, - PAGE_SIZE); - } + gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + + AMDGPU_CSA_SIZE - adev->gds.gds_size, + PAGE_SIZE); de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); @@ -6190,21 +6288,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask, 0x20); } -static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, regSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -6342,25 +6425,23 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - int i; + u32 doorbell_offset = entry->src_data[0]; u8 me_id, pipe_id, queue_id; struct amdgpu_ring *ring; - uint32_t mes_queue_id = entry->src_data[0]; + int i; DRM_DEBUG("IH: CP EOP\n"); - if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { - struct amdgpu_mes_queue *queue; - - mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; + if (adev->enable_mes && doorbell_offset) { + struct amdgpu_userq_fence_driver *fence_drv = NULL; + struct xarray *xa = &adev->userq_xa; + unsigned long flags; - spin_lock(&adev->mes.queue_id_lock); - queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); - if (queue) { - DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); - amdgpu_fence_process(queue->ring); - } - spin_unlock(&adev->mes.queue_id_lock); + xa_lock_irqsave(xa, flags); + fence_drv = xa_load(xa, doorbell_offset); + if (fence_drv) + amdgpu_userq_fence_driver_process(fence_drv); + xa_unlock_irqrestore(xa, flags); } else { me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; @@ -6527,27 +6608,29 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, pipe_id = (entry->ring_id & 0x03) >> 0; queue_id = (entry->ring_id & 0x70) >> 4; - switch (me_id) { - case 0: - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - if (ring->me == me_id && ring->pipe == pipe_id && - ring->queue == queue_id) - drm_sched_fault(&ring->sched); - } - break; - case 1: - case 2: - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - if (ring->me == me_id && ring->pipe == pipe_id && - ring->queue == queue_id) - drm_sched_fault(&ring->sched); + if (!adev->gfx.disable_kq) { + switch (me_id) { + case 0: + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + default: + BUG(); + break; } - break; - default: - BUG(); - break; } } @@ -6655,32 +6738,90 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ } -static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev) +{ + /* Disable the pipe reset until the CPFW fully support it.*/ + dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n"); + return false; +} + + +static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + uint32_t reset_pipe = 0, clean_pipe = 0; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (!gfx_v11_pipe_reset_support(adev)) + return -EOPNOTSUPP; - r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); - if (r) - return r; + gfx_v11_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) { - dev_err(adev->dev, "fail to resv mqd_obj\n"); - return r; + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + PFP_PIPE0_RESET, 1); + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + ME_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + PFP_PIPE0_RESET, 0); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + ME_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + PFP_PIPE1_RESET, 1); + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + ME_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + PFP_PIPE1_RESET, 0); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + ME_PIPE1_RESET, 0); + break; + default: + break; } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v11_0_kgq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; + + WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe); + + r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) - + RS64_FW_UC_START_ADDR_LO; + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + gfx_v11_0_unset_safe_mode(adev, 0); + + dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name, + r == 0 ? "successfully" : "failed"); + /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly, + * so the pipe reset status relies on the later gfx ring test result. + */ + return 0; +} + +static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + if (r) { + + dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); + r = gfx_v11_reset_gfx_pipe(ring); + if (r) + return r; } - amdgpu_bo_unreserve(ring->mqd_obj); + + r = gfx_v11_0_kgq_init_queue(ring, true); if (r) { - dev_err(adev->dev, "fail to unresv mqd_obj\n"); + dev_err(adev->dev, "failed to init kgq\n"); return r; } @@ -6690,37 +6831,159 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + +static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) +{ + + struct amdgpu_device *adev = ring->adev; + uint32_t reset_pipe = 0, clean_pipe = 0; + int r; + + if (!gfx_v11_pipe_reset_support(adev)) + return -EOPNOTSUPP; + + gfx_v11_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + + reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); + clean_pipe = reset_pipe; + + if (adev->gfx.rs64_enable) { + + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE1_RESET, 0); + break; + case 2: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE2_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE2_RESET, 0); + break; + case 3: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE3_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE3_RESET, 0); + break; + default: + break; + } + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe); + r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) - + RS64_FW_UC_START_ADDR_LO; + } else { + if (ring->me == 1) { + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE1_RESET, 0); + break; + case 2: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE2_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE2_RESET, 0); + break; + case 3: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE3_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE3_RESET, 0); + break; + default: + break; + } + /* mec1 fw pc: CP_MEC1_INSTR_PNTR */ + } else { + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE1_RESET, 0); + break; + case 2: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE2_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE2_RESET, 0); + break; + case 3: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE3_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME2_PIPE3_RESET, 0); + break; + default: + break; + } + /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */ + } + WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe); + WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe); + r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR)); + } + + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + gfx_v11_0_unset_safe_mode(adev, 0); + + dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name, + r == 0 ? "successfully" : "failed"); + /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe + * reset status relies on the compute ring test result. + */ + return 0; } -static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r = 0; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); if (r) { - dev_err(adev->dev, "reset via MMIO failed %d\n", r); - return r; + dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r); + r = gfx_v11_0_reset_compute_pipe(ring); + if (r) + return r; } - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) { - dev_err(adev->dev, "fail to resv mqd_obj\n"); - return r; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v11_0_kcq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v11_0_kcq_init_queue(ring, true); if (r) { - dev_err(adev->dev, "fail to unresv mqd_obj\n"); + dev_err(adev->dev, "fail to init kcq\n"); return r; } r = amdgpu_mes_map_legacy_queue(adev, ring); @@ -6729,7 +6992,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -6761,9 +7024,14 @@ static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); for (reg = 0; reg < reg_count; reg++) { - drm_printf(p, "%-50s \t 0x%08x\n", - gc_cp_reg_list_11[reg].reg_name, - adev->gfx.ip_dump_compute_queues[index + reg]); + if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP) + drm_printf(p, "%-50s \t 0x%08x\n", + "regCP_MEC_ME2_HEADER_DUMP", + adev->gfx.ip_dump_compute_queues[index + reg]); + else + drm_printf(p, "%-50s \t 0x%08x\n", + gc_cp_reg_list_11[reg].reg_name, + adev->gfx.ip_dump_compute_queues[index + reg]); } index += reg_count; } @@ -6823,9 +7091,16 @@ static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block) /* ME0 is for GFX so start from 1 for CP */ soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0); for (reg = 0; reg < reg_count; reg++) { - adev->gfx.ip_dump_compute_queues[index + reg] = - RREG32(SOC15_REG_ENTRY_OFFSET( - gc_cp_reg_list_11[reg])); + if (i && + gc_cp_reg_list_11[reg].reg_offset == + regCP_MEC_ME1_HEADER_DUMP) + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_OFFSET(GC, 0, + regCP_MEC_ME2_HEADER_DUMP)); + else + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET( + gc_cp_reg_list_11[reg])); } index += reg_count; } @@ -6953,7 +7228,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kgq, .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, @@ -6995,7 +7269,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kcq, .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index dceb5ad38862..3e138527d534 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -36,7 +36,7 @@ #include "gc/gc_12_0_0_offset.h" #include "gc/gc_12_0_0_sh_mask.h" #include "soc24_enum.h" -#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" +#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h" #include "soc15.h" #include "clearstate_gfx12.h" @@ -44,6 +44,8 @@ #include "gfx_v12_0.h" #include "nbif_v6_3_1.h" #include "mes_v12_0.h" +#include "mes_userqueue.h" +#include "amdgpu_userq_fence.h" #define GFX12_NUM_GFX_RINGS 1 #define GFX12_MEC_HPD_SIZE 2048 @@ -77,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { @@ -133,11 +136,14 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0), SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1), SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR), - /* cp header registers */ - SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP), /* SE status registers */ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), @@ -186,7 +192,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET), SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT), SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET), - SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS) + SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS), + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), }; static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { @@ -215,7 +230,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO), SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI), SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ), - SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) + SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ), + /* cp header registers */ + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP), }; static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = { @@ -475,33 +507,18 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) memset(&ib, 0, sizeof(ib)); - if (ring->is_mes_queue) { - uint32_t padding, offset; - - offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); - padding = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - - ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); - *cpu_ptr = cpu_to_le32(0xCAFEDEAD); - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) - return r; + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); - cpu_ptr = &adev->wb.wb[index]; + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); + cpu_ptr = &adev->wb.wb[index]; - r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); - if (r) { - dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); - goto err1; - } + r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); + goto err1; } ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); @@ -528,12 +545,10 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) else r = -EINVAL; err2: - if (!ring->is_mes_queue) - amdgpu_ib_free(&ib, NULL); + amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err1: - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -572,7 +587,7 @@ out: static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -599,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); if (!amdgpu_sriov_vf(adev)) { - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, - AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_rlc.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc.bin", ucode_prefix); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -881,6 +901,34 @@ static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev, soc24_grbm_select(adev, me, pipe, q, vm); } +/* all sizes are in bytes */ +#define MQD_SHADOW_BASE_SIZE 73728 +#define MQD_SHADOW_BASE_ALIGNMENT 256 +#define MQD_FWWORKAREA_SIZE 484 +#define MQD_FWWORKAREA_ALIGNMENT 256 + +static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev, + struct amdgpu_gfx_shadow_info *shadow_info) +{ + shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE; + shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT; + shadow_info->csa_size = MQD_FWWORKAREA_SIZE; + shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT; +} + +static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev, + struct amdgpu_gfx_shadow_info *shadow_info, + bool skip_check) +{ + if (adev->gfx.cp_gfx_shadow || skip_check) { + gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info); + return 0; + } + + memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info)); + return -EINVAL; +} + static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter, .select_se_sh = &gfx_v12_0_select_se_sh, @@ -889,6 +937,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { .read_wave_vgprs = &gfx_v12_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q, .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk, + .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info, }; static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) @@ -1346,6 +1395,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) unsigned num_compute_rings; int xcc_id = 0; struct amdgpu_device *adev = ip_block->adev; + int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler); @@ -1354,8 +1404,8 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(12, 0, 1): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; - adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.me.num_queue_per_pipe = 8; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 2; adev->gfx.mec.num_queue_per_pipe = 4; break; @@ -1372,6 +1422,22 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): + if (!adev->gfx.disable_uq && + adev->gfx.me_fw_version >= 2780 && + adev->gfx.pfp_fw_version >= 2840 && + adev->gfx.mec_fw_version >= 3050 && + adev->mes.fw_version[0] >= 123) { + adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; + adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; + } + break; + default: + break; + } + + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): if (adev->gfx.me_fw_version >= 2480 && adev->gfx.pfp_fw_version >= 2530 && adev->gfx.mec_fw_version >= 2680 && @@ -1383,36 +1449,38 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) break; } - /* recalculate compute rings to use based on hardware configuration */ - num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * - adev->gfx.mec.num_queue_per_pipe) / 2; - adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, - num_compute_rings); + if (adev->gfx.num_compute_rings) { + /* recalculate compute rings to use based on hardware configuration */ + num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe) / 2; + adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, + num_compute_rings); + } /* EOP Event */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, + GFX_12_0_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); if (r) return r; /* Bad opcode Event */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR, + GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR, &adev->gfx.bad_op_irq); if (r) return r; /* Privileged reg */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, + GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT, &adev->gfx.priv_reg_irq); if (r) return r; /* Privileged inst */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, - GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, + GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT, &adev->gfx.priv_inst_irq); if (r) return r; @@ -1433,37 +1501,41 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) return r; } - /* set up the gfx ring */ - for (i = 0; i < adev->gfx.me.num_me; i++) { - for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { - for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { - if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) - continue; - - r = gfx_v12_0_gfx_ring_init(adev, ring_id, - i, k, j); - if (r) - return r; - ring_id++; + if (adev->gfx.num_gfx_rings) { + /* set up the gfx ring */ + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { + if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) + continue; + + r = gfx_v12_0_gfx_ring_init(adev, ring_id, + i, k, j); + if (r) + return r; + ring_id++; + } } } } - ring_id = 0; - /* set up the compute queues - allocate horizontally across pipes */ - for (i = 0; i < adev->gfx.mec.num_mec; ++i) { - for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { - for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, - 0, i, k, j)) - continue; + if (adev->gfx.num_compute_rings) { + ring_id = 0; + /* set up the compute queues - allocate horizontally across pipes */ + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { + if (!amdgpu_gfx_is_mec_queue_enabled(adev, + 0, i, k, j)) + continue; - r = gfx_v12_0_compute_ring_init(adev, ring_id, - i, k, j); - if (r) - return r; + r = gfx_v12_0_compute_ring_init(adev, ring_id, + i, k, j); + if (r) + return r; - ring_id++; + ring_id++; + } } } } @@ -1476,10 +1548,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): if ((adev->gfx.me_fw_version >= 2660) && - (adev->gfx.mec_fw_version >= 2920)) { - adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + (adev->gfx.mec_fw_version >= 2920) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } + break; + default: + break; } if (!adev->enable_mes_kiq) { @@ -2324,7 +2400,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); @@ -2468,7 +2544,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); if (amdgpu_emu_mode == 1) - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); @@ -2948,6 +3024,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, #ifdef __BIG_ENDIAN tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); #endif + if (prop->tmz_queue) + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); mqd->cp_gfx_hqd_cntl = tmp; /* set up cp_doorbell_control */ @@ -2968,6 +3046,14 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, /* active the queue */ mqd->cp_gfx_hqd_active = 1; + /* set gfx UQ items */ + mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr); + mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr); + mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr); + mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr); + mqd->fence_address_lo = lower_32_bits(prop->fence_address); + mqd->fence_address_hi = upper_32_bits(prop->fence_address); + return 0; } @@ -3001,37 +3087,19 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) { - int r, i; - struct amdgpu_ring *ring; + int i, r; for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v12_0_kgq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); if (r) - goto done; + return r; } r = amdgpu_gfx_enable_kgq(adev, 0); if (r) - goto done; - - r = gfx_v12_0_cp_gfx_start(adev); - if (r) - goto done; + return r; -done: - return r; + return gfx_v12_0_cp_gfx_start(adev); } static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, @@ -3109,6 +3177,8 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + if (prop->tmz_queue) + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); mqd->cp_hqd_pq_control = tmp; /* set the wb address whether it's enabled or not */ @@ -3160,6 +3230,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_active = prop->hqd_active; + /* set UQ fenceaddress */ + mqd->fence_address_lo = lower_32_bits(prop->fence_address); + mqd->fence_address_hi = upper_32_bits(prop->fence_address); + return 0; } @@ -3344,57 +3418,25 @@ static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset) static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - int r; - - ring = &adev->gfx.kiq[0].ring; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } - - gfx_v12_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); - ring->sched.ready = true; + gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring); + adev->gfx.kiq[0].ring.sched.ready = true; return 0; } static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; + int i, r; if (!amdgpu_async_gfx_ring) gfx_v12_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v12_0_kcq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); if (r) - goto done; + return r; } - r = amdgpu_gfx_enable_kcq(adev, 0); -done: - return r; + return amdgpu_gfx_enable_kcq(adev, 0); } static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) @@ -3476,7 +3518,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) if (r) return r; - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; @@ -3650,6 +3692,49 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block) return r; } +static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev, + bool enable) +{ + unsigned int irq_type; + int m, p, r; + + if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) { + for (m = 0; m < adev->gfx.me.num_me; m++) { + for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) { + irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p; + if (enable) + r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, + irq_type); + else + r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, + irq_type); + if (r) + return r; + } + } + } + + if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) { + for (m = 0; m < adev->gfx.mec.num_mec; ++m) { + for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) { + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + + (m * adev->gfx.mec.num_pipe_per_mec) + + p; + if (enable) + r = amdgpu_irq_get(adev, &adev->gfx.eop_irq, + irq_type); + else + r = amdgpu_irq_put(adev, &adev->gfx.eop_irq, + irq_type); + if (r) + return r; + } + } + } + + return 0; +} + static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3660,6 +3745,7 @@ static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); + gfx_v12_0_set_userq_eop_interrupts(adev, false); if (!adev->no_hw_access) { if (amdgpu_async_gfx_ring) { @@ -3748,11 +3834,33 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + switch (amdgpu_user_queue) { + case -1: + case 0: + default: + adev->gfx.disable_kq = false; + adev->gfx.disable_uq = true; + break; + case 1: + adev->gfx.disable_kq = false; + adev->gfx.disable_uq = false; + break; + case 2: + adev->gfx.disable_kq = true; + adev->gfx.disable_uq = false; + break; + } + adev->gfx.funcs = &gfx_v12_0_gfx_funcs; - adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), - AMDGPU_MAX_COMPUTE_RINGS); + if (adev->gfx.disable_kq) { + adev->gfx.num_gfx_rings = 0; + adev->gfx.num_compute_rings = 0; + } else { + adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); + } gfx_v12_0_set_kiq_pm4_funcs(adev); gfx_v12_0_set_ring_funcs(adev); @@ -3783,6 +3891,10 @@ static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block) if (r) return r; + r = gfx_v12_0_set_userq_eop_interrupts(adev, true); + if (r) + return r; + return 0; } @@ -4222,45 +4334,17 @@ static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; - uint64_t wptr_tmp; - - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - ring->hw_prio); - - wptr_tmp = ring->wptr & ring->buf_mask; - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); - *wptr_saved = wptr_tmp; - /* assume doorbell always being used by mes mapped queue */ - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, wptr_tmp); - WDOORBELL64(ring->doorbell_index, wptr_tmp); - } else { - WDOORBELL64(ring->doorbell_index, wptr_tmp); - if (*is_queue_unmap) - WDOORBELL64(aggregated_db_index, wptr_tmp); - } + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, + ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); } else { - if (ring->use_doorbell) { - /* XXX check if swapping is necessary on BE */ - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr); - WDOORBELL64(ring->doorbell_index, ring->wptr); - } else { - WREG32_SOC15(GC, 0, regCP_RB0_WPTR, - lower_32_bits(ring->wptr)); - WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, - upper_32_bits(ring->wptr)); - } + WREG32_SOC15(GC, 0, regCP_RB0_WPTR, + lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, + upper_32_bits(ring->wptr)); } } @@ -4285,42 +4369,14 @@ static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring) static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; - uint64_t wptr_tmp; - - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - ring->hw_prio); - - wptr_tmp = ring->wptr & ring->buf_mask; - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); - *wptr_saved = wptr_tmp; - /* assume doorbell always used by mes mapped queue */ - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, wptr_tmp); - WDOORBELL64(ring->doorbell_index, wptr_tmp); - } else { - WDOORBELL64(ring->doorbell_index, wptr_tmp); - if (*is_queue_unmap) - WDOORBELL64(aggregated_db_index, wptr_tmp); - } + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) { + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, + ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); } else { - /* XXX check if swapping is necessary on BE */ - if (ring->use_doorbell) { - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr); - WDOORBELL64(ring->doorbell_index, ring->wptr); - } else { - BUG(); /* only DOORBELL method supported on gfx12 now */ - } + BUG(); /* only DOORBELL method supported on gfx12 now */ } } @@ -4367,10 +4423,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (ring->is_mes_queue) - /* inherit vmid from mqd */ - control |= 0x400000; - amdgpu_ring_write(ring, header); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ amdgpu_ring_write(ring, @@ -4390,10 +4442,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); - if (ring->is_mes_queue) - /* inherit vmid from mqd */ - control |= 0x40000000; - amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ amdgpu_ring_write(ring, @@ -4433,8 +4481,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, upper_32_bits(addr)); amdgpu_ring_write(ring, lower_32_bits(seq)); amdgpu_ring_write(ring, upper_32_bits(seq)); - amdgpu_ring_write(ring, ring->is_mes_queue ? - (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); + amdgpu_ring_write(ring, 0); } static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) @@ -4462,10 +4509,7 @@ static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { - if (ring->is_mes_queue) - gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); - else - amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* compute doesn't have PFP */ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { @@ -4656,21 +4700,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask, 0x20); } -static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, regSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -4799,25 +4828,23 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - int i; + u32 doorbell_offset = entry->src_data[0]; u8 me_id, pipe_id, queue_id; struct amdgpu_ring *ring; - uint32_t mes_queue_id = entry->src_data[0]; + int i; DRM_DEBUG("IH: CP EOP\n"); - if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { - struct amdgpu_mes_queue *queue; - - mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; + if (adev->enable_mes && doorbell_offset) { + struct amdgpu_userq_fence_driver *fence_drv = NULL; + struct xarray *xa = &adev->userq_xa; + unsigned long flags; - spin_lock(&adev->mes.queue_id_lock); - queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); - if (queue) { - DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); - amdgpu_fence_process(queue->ring); - } - spin_unlock(&adev->mes.queue_id_lock); + xa_lock_irqsave(xa, flags); + fence_drv = xa_load(xa, doorbell_offset); + if (fence_drv) + amdgpu_userq_fence_driver_process(fence_drv); + xa_unlock_irqrestore(xa, flags); } else { me_id = (entry->ring_id & 0x0c) >> 2; pipe_id = (entry->ring_id & 0x03) >> 0; @@ -4984,27 +5011,29 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev, pipe_id = (entry->ring_id & 0x03) >> 0; queue_id = (entry->ring_id & 0x70) >> 4; - switch (me_id) { - case 0: - for (i = 0; i < adev->gfx.num_gfx_rings; i++) { - ring = &adev->gfx.gfx_ring[i]; - if (ring->me == me_id && ring->pipe == pipe_id && - ring->queue == queue_id) - drm_sched_fault(&ring->sched); - } - break; - case 1: - case 2: - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - if (ring->me == me_id && ring->pipe == pipe_id && - ring->queue == queue_id) - drm_sched_fault(&ring->sched); + if (!adev->gfx.disable_kq) { + switch (me_id) { + case 0: + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + default: + BUG(); + break; } - break; - default: - BUG(); - break; } } @@ -5210,34 +5239,89 @@ static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block) amdgpu_gfx_off_ctrl(adev, true); } -static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev) +{ + /* Disable the pipe reset until the CPFW fully support it.*/ + dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n"); + return false; +} + +static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + uint32_t reset_pipe = 0, clean_pipe = 0; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + if (!gfx_v12_pipe_reset_support(adev)) + return -EOPNOTSUPP; + + gfx_v12_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + PFP_PIPE0_RESET, 1); + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + ME_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + PFP_PIPE0_RESET, 0); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + ME_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + PFP_PIPE1_RESET, 1); + reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL, + ME_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + PFP_PIPE1_RESET, 0); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL, + ME_PIPE1_RESET, 0); + break; + default: + break; + } + + WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe); + + r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) - + RS64_FW_UC_START_ADDR_LO; + soc24_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + gfx_v12_0_unset_safe_mode(adev, 0); + + dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name, + r == 0 ? "successfully" : "failed"); + /* Sometimes the ME start pc counter can't cache correctly, so the + * PC check only as a reference and pipe reset result rely on the + * later ring test. + */ + return 0; +} + +static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); if (r) { - dev_err(adev->dev, "reset via MES failed %d\n", r); - return r; + dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); + r = gfx_v12_reset_gfx_pipe(ring); + if (r) + return r; } - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) { - dev_err(adev->dev, "fail to resv mqd_obj\n"); - return r; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v12_0_kgq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v12_0_kgq_init_queue(ring, true); if (r) { - DRM_ERROR("fail to unresv mqd_obj\n"); + dev_err(adev->dev, "failed to init kgq\n"); return r; } @@ -5247,37 +5331,112 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } -static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reset_pipe = 0, clean_pipe = 0; + int r = 0; + + if (!gfx_v12_pipe_reset_support(adev)) + return -EOPNOTSUPP; + + gfx_v12_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + + reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); + clean_pipe = reset_pipe; + + if (adev->gfx.rs64_enable) { + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE1_RESET, 0); + break; + case 2: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE2_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE2_RESET, 0); + break; + case 3: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE3_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL, + MEC_PIPE3_RESET, 0); + break; + default: + break; + } + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe); + r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) - + RS64_FW_UC_START_ADDR_LO; + } else { + switch (ring->pipe) { + case 0: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE0_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE0_RESET, 0); + break; + case 1: + reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE1_RESET, 1); + clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL, + MEC_ME1_PIPE1_RESET, 0); + break; + default: + break; + } + WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe); + WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe); + /* Doesn't find the F32 MEC instruction pointer register, and suppose + * the driver won't run into the F32 mode. + */ + } + + soc24_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + gfx_v12_0_unset_safe_mode(adev, 0); + + dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name, + r == 0 ? "successfully" : "failed"); + /* Need the ring test to verify the pipe reset result.*/ + return 0; +} + +static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); if (r) { - dev_err(adev->dev, "reset via MMIO failed %d\n", r); - return r; + dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r); + r = gfx_v12_0_reset_compute_pipe(ring); + if (r) + return r; } - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) { - DRM_ERROR("fail to resv mqd_obj\n"); - return r; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v12_0_kcq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v12_0_kcq_init_queue(ring, true); if (r) { - DRM_ERROR("fail to unresv mqd_obj\n"); + dev_err(adev->dev, "failed to init kcq\n"); return r; } r = amdgpu_mes_map_legacy_queue(adev, ring); @@ -5286,7 +5445,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring) @@ -5364,7 +5523,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kgq, .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, @@ -5403,7 +5561,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kcq, .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 13fbee46417a..70d7a1f434c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -53,6 +53,9 @@ #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 #define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 +#define GFX6_NUM_GFX_RINGS 1 +#define GFX6_NUM_COMPUTE_RINGS 2 + static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev); @@ -1732,10 +1735,14 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev) gfx_v6_0_get_cu_info(adev); gfx_v6_0_config_init(adev); - WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) | - (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT))); - WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | - (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); + WREG32(mmCP_QUEUE_THRESHOLDS, + ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) | + (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT))); + + /* set HW defaults for 3D engine */ + WREG32(mmCP_MEQ_THRESHOLDS, + (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | + (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); sx_debug_1 = RREG32(mmSX_DEBUG_1); WREG32(mmSX_DEBUG_1, sx_debug_1); @@ -2851,44 +2858,21 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev) static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; if (adev->gfx.rlc.cs_data == NULL) return; if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + amdgpu_gfx_csb_preamble_end(buffer, count); } static void gfx_v6_0_init_pg(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8181bd0e4f18..2aa323dab34e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -55,6 +55,9 @@ #define GFX7_NUM_GFX_RINGS 1 #define GFX7_MEC_HPD_SIZE 2048 +#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 +#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 + static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); @@ -3882,67 +3885,22 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; if (adev->gfx.rlc.cs_data == NULL) return; if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); - switch (adev->asic_type) { - case CHIP_BONAIRE: - buffer[count++] = cpu_to_le32(0x16000012); - buffer[count++] = cpu_to_le32(0x00000000); - break; - case CHIP_KAVERI: - buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ - buffer[count++] = cpu_to_le32(0x00000000); - break; - case CHIP_KABINI: - case CHIP_MULLINS: - buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ - buffer[count++] = cpu_to_le32(0x00000000); - break; - case CHIP_HAWAII: - buffer[count++] = cpu_to_le32(0x3a00161a); - buffer[count++] = cpu_to_le32(0x0000002e); - break; - default: - buffer[count++] = cpu_to_le32(0x00000000); - buffer[count++] = cpu_to_le32(0x00000000); - break; - } - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); + buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + amdgpu_gfx_csb_preamble_end(buffer, count); } static void gfx_v7_0_init_pg(struct amdgpu_device *adev) @@ -4926,76 +4884,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ } -static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, - int mem_space, int opt, uint32_t addr0, - uint32_t addr1, uint32_t ref, uint32_t mask, - uint32_t inv) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, - /* memory (1) or register (0) */ - (WAIT_REG_MEM_MEM_SPACE(mem_space) | - WAIT_REG_MEM_OPERATION(opt) | /* wait */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(eng_sel))); - - if (mem_space) - BUG_ON(addr0 & 0x3); /* Dword align */ - amdgpu_ring_write(ring, addr0); - amdgpu_ring_write(ring, addr1); - amdgpu_ring_write(ring, ref); - amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, inv); /* poll interval */ -} - -static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); -} - -static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) - return -ENOMEM; - gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); - gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); - - return amdgpu_ring_test_ring(ring); -} - static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, @@ -5045,7 +4933,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .emit_wreg = gfx_v7_0_ring_emit_wreg, .soft_recovery = gfx_v7_0_ring_soft_recovery, .emit_mem_sync = gfx_v7_0_emit_mem_sync, - .reset = gfx_v7_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d116a2e2f469..367449d8061b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1223,48 +1223,22 @@ out: static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; if (adev->gfx.rlc.cs_data == NULL) return; if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - - PACKET3_SET_CONTEXT_REG_START); + buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + amdgpu_gfx_csb_preamble_end(buffer, count); } static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev) @@ -4666,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } return 0; @@ -4683,60 +4658,25 @@ static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev) static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - int r; - - ring = &adev->gfx.kiq[0].ring; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } - - gfx_v8_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); + gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[0].ring); return 0; } static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; + int i, r; gfx_v8_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); - if (!r) { - r = gfx_v8_0_kcq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v8_0_kcq_init_queue(&adev->gfx.compute_ring[i]); if (r) - goto done; + return r; } gfx_v8_0_set_mec_doorbell_range(adev); - r = gfx_v8_0_kiq_kcq_enable(adev); - if (r) - goto done; - -done: - return r; + return gfx_v8_0_kiq_kcq_enable(adev); } static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) @@ -6400,34 +6340,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, amdgpu_ring_write(ring, val); } -static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, - int mem_space, int opt, uint32_t addr0, - uint32_t addr1, uint32_t ref, uint32_t mask, - uint32_t inv) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, - /* memory (1) or register (0) */ - (WAIT_REG_MEM_MEM_SPACE(mem_space) | - WAIT_REG_MEM_OPERATION(opt) | /* wait */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(eng_sel))); - - if (mem_space) - BUG_ON(addr0 & 0x3); /* Dword align */ - amdgpu_ring_write(ring, addr0); - amdgpu_ring_write(ring, addr1); - amdgpu_ring_write(ring, ref); - amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, inv); /* poll interval */ -} - -static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); -} - static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) { struct amdgpu_device *adev = ring->adev; @@ -6904,48 +6816,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } -static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) - return -ENOMEM; - gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); - gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); - - return amdgpu_ring_test_ring(ring); -} - static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -7011,7 +6881,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .emit_wreg = gfx_v8_0_ring_emit_wreg, .soft_recovery = gfx_v8_0_ring_soft_recovery, .emit_mem_sync = gfx_v8_0_emit_mem_sync, - .reset = gfx_v8_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d345285ea885..20b30f4b3c7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -225,17 +225,36 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = { SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE), SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT), SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6), - /* cp header registers */ - SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), /* SE status registers */ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0), SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1), SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2), - SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3) + SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3), + /* packet headers */ + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP) }; static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = { @@ -277,6 +296,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = { SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO), SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI), SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP) }; enum ta_ras_gfx_subblock { @@ -1269,6 +1296,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = false; if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) && ((adev->gfx.mec_fw_version < 0x000001a5) || (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || @@ -1623,42 +1651,16 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer) { - u32 count = 0, i; - const struct cs_section_def *sect = NULL; - const struct cs_extent_def *ext = NULL; + u32 count = 0; if (adev->gfx.rlc.cs_data == NULL) return; if (buffer == NULL) return; - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); - buffer[count++] = cpu_to_le32(0x80000000); - buffer[count++] = cpu_to_le32(0x80000000); - - for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { - for (ext = sect->section; ext->extent != NULL; ++ext) { - if (sect->id == SECT_CONTEXT) { - buffer[count++] = - cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); - buffer[count++] = cpu_to_le32(ext->reg_index - - PACKET3_SET_CONTEXT_REG_START); - for (i = 0; i < ext->reg_count; i++) - buffer[count++] = cpu_to_le32(ext->extent[i]); - } else { - return; - } - } - } - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); - - buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); - buffer[count++] = cpu_to_le32(0); + count = amdgpu_gfx_csb_preamble_start(buffer); + count = amdgpu_gfx_csb_data_parser(adev, buffer, count); + amdgpu_gfx_csb_preamble_end(buffer, count); } static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) @@ -2233,6 +2235,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) } switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 3, 0): + adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 167 && + adev->gfx.pfp_fw_version >= 196 && + adev->gfx.mec_fw_version >= 474) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; case IP_VERSION(9, 4, 2): adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); @@ -2389,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!amdgpu_sriov_vf(adev)) + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { @@ -3890,55 +3913,23 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore) static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - int r; - - ring = &adev->gfx.kiq[0].ring; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } - - gfx_v9_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); + gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring); return 0; } static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; + int i, r; gfx_v9_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v9_0_kcq_init_queue(ring, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); if (r) - goto done; + return r; } - r = amdgpu_gfx_enable_kcq(adev, 0); -done: - return r; + return amdgpu_gfx_enable_kcq(adev, 0); } static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) @@ -5472,16 +5463,8 @@ static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring, payload_size = sizeof(struct v9_ce_ib_state); - if (ring->is_mes_queue) { - payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gfx_meta_data) + - offsetof(struct v9_gfx_meta_data, ce_payload); - ce_payload_cpu_addr = - amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); - } else { - payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload); - ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; - } + payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; if (offset + (payload_size >> 2) <= ring->buf_mask + 1) { memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size); @@ -5504,16 +5487,8 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring, payload_size = sizeof(struct v9_de_ib_state); - if (ring->is_mes_queue) { - payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gfx_meta_data) + - offsetof(struct v9_gfx_meta_data, de_payload); - de_payload_cpu_addr = - amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset); - } else { - payload_offset = offsetof(struct v9_gfx_meta_data, de_payload); - de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; - } + payload_offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset; ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status = IB_COMPLETION_STATUS_PREEMPTED; @@ -5703,19 +5678,9 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) cnt = (sizeof(ce_payload) >> 2) + 4 - 2; - if (ring->is_mes_queue) { - offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gfx_meta_data) + - offsetof(struct v9_gfx_meta_data, ce_payload); - ce_payload_gpu_addr = - amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ce_payload_cpu_addr = - amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - } else { - offset = offsetof(struct v9_gfx_meta_data, ce_payload); - ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; - ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; - } + offset = offsetof(struct v9_gfx_meta_data, ce_payload); + ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | @@ -5801,28 +5766,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo void *de_payload_cpu_addr; int cnt; - if (ring->is_mes_queue) { - offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gfx_meta_data) + - offsetof(struct v9_gfx_meta_data, de_payload); - de_payload_gpu_addr = - amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - de_payload_cpu_addr = - amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - offset = offsetof(struct amdgpu_mes_ctx_meta_data, - gfx[0].gds_backup) + - offsetof(struct v9_gfx_meta_data, de_payload); - gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - } else { - offset = offsetof(struct v9_gfx_meta_data, de_payload); - de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; - de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + offset = offsetof(struct v9_gfx_meta_data, de_payload); + de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; - gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + - AMDGPU_CSA_SIZE - adev->gds.gds_size, - PAGE_SIZE); - } + gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + + AMDGPU_CSA_SIZE - adev->gds.gds_size, + PAGE_SIZE); if (usegds) { de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); @@ -7223,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_insert_nop(ring, num_nop - 1); } -static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v9_0_ring_emit_wreg(kiq_ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 7 + 5)) - return -ENOMEM; - gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v9_0_ring_emit_reg_wait(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff); - gfx_v9_0_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0); - - return amdgpu_ring_test_ring(ring); -} - static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -7277,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -7319,20 +7224,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, return r; } - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)){ - dev_err(adev->dev, "fail to resv mqd_obj\n"); - return r; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v9_0_kcq_init_queue(ring, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v9_0_kcq_init_queue(ring, true); if (r) { - dev_err(adev->dev, "fail to unresv mqd_obj\n"); + dev_err(adev->dev, "fail to init kcq\n"); return r; } spin_lock_irqsave(&kiq->ring_lock, flags); @@ -7343,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) { DRM_ERROR("fail to remap queue\n"); return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -7381,9 +7275,14 @@ static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k); for (reg = 0; reg < reg_count; reg++) { - drm_printf(p, "%-50s \t 0x%08x\n", - gc_cp_reg_list_9[reg].reg_name, - adev->gfx.ip_dump_compute_queues[index + reg]); + if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP) + drm_printf(p, "%-50s \t 0x%08x\n", + "mmCP_MEC_ME2_HEADER_DUMP", + adev->gfx.ip_dump_compute_queues[index + reg]); + else + drm_printf(p, "%-50s \t 0x%08x\n", + gc_cp_reg_list_9[reg].reg_name, + adev->gfx.ip_dump_compute_queues[index + reg]); } index += reg_count; } @@ -7420,9 +7319,13 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block) soc15_grbm_select(adev, 1 + i, j, k, 0, 0); for (reg = 0; reg < reg_count; reg++) { - adev->gfx.ip_dump_compute_queues[index + reg] = - RREG32(SOC15_REG_ENTRY_OFFSET( - gc_cp_reg_list_9[reg])); + if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP) + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP)); + else + adev->gfx.ip_dump_compute_queues[index + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET( + gc_cp_reg_list_9[reg])); } index += reg_count; } @@ -7436,8 +7339,14 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block) static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; + /* Emit the cleaner shader */ - amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); + else + amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0)); + amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ } @@ -7544,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, - .reset = gfx_v9_0_reset_kgq, .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index d81449f9d822..c48cd47b531f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1547,7 +1547,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev, { uint32_t bank, way, mem; static const char * const vml2_way_str[] = { "BIGK", "4K" }; - static const char * const utcl2_rounter_str[] = { "VMC", "APT" }; + static const char * const utcl2_router_str[] = { "VMC", "APT" }; mem = instance % blk->num_mem_blocks; way = (instance / blk->num_mem_blocks) % blk->num_ways; @@ -1568,7 +1568,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev, dev_info( adev->dev, "GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n", - bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt); + bank, utcl2_router_str[mem], sec_cnt, ded_cnt); break; case ATC_L2_CACHE_2M: dev_info( diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 736398b0d16d..51babf5c78c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -105,9 +105,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = { SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE), SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT), SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6), - /* cp header registers */ - SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), - SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP), /* SE status registers */ SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), @@ -154,6 +151,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), + SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), }; struct amdgpu_gfx_ras gfx_v9_4_3_ras; @@ -867,13 +872,12 @@ static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, switch (type) { case ACA_SMU_TYPE_UE: - bank->aca_err_type = ACA_BANK_ERR_UE_DE_DECODE(bank); + bank->aca_err_type = ACA_ERROR_TYPE_UE; ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL); break; case ACA_SMU_TYPE_CE: - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); - ret = aca_error_cache_log_bank_error(handle, &info, - bank->aca_err_type, + bank->aca_err_type = ACA_ERROR_TYPE_CE; + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, ACA_REG__MISC0__ERRCNT(misc0)); break; default: @@ -1144,7 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): - if (adev->gfx.mec_fw_version >= 155) { + if ((adev->gfx.mec_fw_version >= 155) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; + } + break; + case IP_VERSION(9, 5, 0): + if ((adev->gfx.mec_fw_version >= 21) && + !amdgpu_sriov_vf(adev)) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } @@ -1263,6 +1275,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) } } +/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1 + * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain + * bit in SET_RESOURCES + */ +static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id) +{ + uint32_t data; + + if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)) + return; + + data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1); + data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1); + WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data); +} + static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, int xcc_id) { @@ -1307,6 +1335,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); + gfx_v9_4_3_xcc_init_sq(adev, xcc_id); } static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) @@ -1319,6 +1348,22 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) adev->gfx.config.db_debug2 = RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + /* ToDo: GC 9.4.4 */ + case IP_VERSION(9, 4, 3): + if (adev->gfx.mec_fw_version >= 184 && + (amdgpu_sriov_reg_access_sq_config(adev) || + !amdgpu_sriov_vf(adev))) + adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; + break; + case IP_VERSION(9, 5, 0): + if (adev->gfx.mec_fw_version >= 23) + adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; + break; + default: + break; + } + for (i = 0; i < num_xcc; i++) gfx_v9_4_3_xcc_constants_init(adev, i); } @@ -2168,55 +2213,27 @@ static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_ring *ring; - int r; - - ring = &adev->gfx.kiq[xcc_id].ring; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (unlikely(r != 0)) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } - - gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); + gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id); return 0; } static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; + struct amdgpu_ring *ring; + int i, r; gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto done; - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + ring = &adev->gfx.compute_ring[i + xcc_id * + adev->gfx.num_compute_rings]; + + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); if (r) - goto done; + return r; } - r = amdgpu_gfx_enable_kcq(adev, xcc_id); -done: - return r; + return amdgpu_gfx_enable_kcq(adev, xcc_id); } static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) @@ -3476,9 +3493,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me, static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev) { - /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/ - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && - adev->gfx.mec_fw_version >= 0x0000009b) + if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) return true; else dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n"); @@ -3541,7 +3556,8 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) } static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; @@ -3549,12 +3565,11 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -3580,7 +3595,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); pipe_reset: - if(r) { + if (r) { + if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) + return -EOPNOTSUPP; r = gfx_v9_4_3_reset_hw_pipe(ring); dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, r ? "failed" : "successfully"); @@ -3588,20 +3605,9 @@ pipe_reset: return r; } - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)){ - dev_err(adev->dev, "fail to resv mqd_obj\n"); - return r; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } - amdgpu_bo_unreserve(ring->mqd_obj); + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); if (r) { - dev_err(adev->dev, "fail to unresv mqd_obj\n"); + dev_err(adev->dev, "fail to init kcq\n"); return r; } spin_lock_irqsave(&kiq->ring_lock, flags); @@ -3612,14 +3618,14 @@ pipe_reset: } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) { dev_err(adev->dev, "fail to remap queue\n"); return r; } - return amdgpu_ring_test_ring(ring); + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } enum amdgpu_gfx_cp_ras_mem_id { @@ -4598,12 +4604,21 @@ static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_pri "\nxcc:%d mec:%d, pipe:%d, queue:%d\n", xcc_id, i, j, k); for (reg = 0; reg < reg_count; reg++) { - drm_printf(p, - "%-50s \t 0x%08x\n", - gc_cp_reg_list_9_4_3[reg].reg_name, - adev->gfx.ip_dump_compute_queues - [xcc_offset + inst_offset + - reg]); + if (i && gc_cp_reg_list_9_4_3[reg].reg_offset == + regCP_MEC_ME1_HEADER_DUMP) + drm_printf(p, + "%-50s \t 0x%08x\n", + "regCP_MEC_ME2_HEADER_DUMP", + adev->gfx.ip_dump_compute_queues + [xcc_offset + inst_offset + + reg]); + else + drm_printf(p, + "%-50s \t 0x%08x\n", + gc_cp_reg_list_9_4_3[reg].reg_name, + adev->gfx.ip_dump_compute_queues + [xcc_offset + inst_offset + + reg]); } inst_offset += reg_count; } @@ -4652,12 +4667,20 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block) GET_INST(GC, xcc_id)); for (reg = 0; reg < reg_count; reg++) { - adev->gfx.ip_dump_compute_queues - [xcc_offset + - inst_offset + reg] = - RREG32(SOC15_REG_ENTRY_OFFSET_INST( - gc_cp_reg_list_9_4_3[reg], - GET_INST(GC, xcc_id))); + if (i && gc_cp_reg_list_9_4_3[reg].reg_offset == + regCP_MEC_ME1_HEADER_DUMP) + adev->gfx.ip_dump_compute_queues + [xcc_offset + + inst_offset + reg] = + RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), + regCP_MEC_ME2_HEADER_DUMP)); + else + adev->gfx.ip_dump_compute_queues + [xcc_offset + + inst_offset + reg] = + RREG32(SOC15_REG_ENTRY_OFFSET_INST( + gc_cp_reg_list_9_4_3[reg], + GET_INST(GC, xcc_id))); } inst_offset += reg_count; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index cb25f7f0dfc1..6c03bf9f1ae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, uint32_t xcc_mask) { + uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ? + adev->gmc.vram_start : adev->gmc.fb_start; uint64_t pt_base; int i; @@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); + (u32)(gart_start >> 12)); WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + (u32)(gart_start >> 44)); WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, @@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, /* In the case squeezing vram into GART aperture, we don't use * FB aperture and AGP aperture. Disable them. */ - if (adev->gmc.pdb0_bo) { + if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) { WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0); WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 95d894a231fc..7923f491cf73 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -164,10 +164,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid); task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " in process %s pid %d thread %s pid %d\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -268,7 +265,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* This is necessary for SRIOV as well as for GFXOFF to function * properly under bare metal @@ -428,10 +425,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int struct amdgpu_device *adev = ring->adev; uint32_t reg; - /* MES fw manages IH_VMID_x_LUT updating */ - if (ring->is_mes_queue) - return; - if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else @@ -969,7 +962,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) adev->hdp.funcs->init_registers(adev); /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index ad099f136f84..f15d691e9a20 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -134,10 +134,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid); task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " in process %s pid %d thread %s pid %d)\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -229,7 +226,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* This is necessary for SRIOV as well as for GFXOFF to function * properly under bare metal @@ -393,10 +390,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int struct amdgpu_device *adev = ring->adev; uint32_t reg; - /* MES fw manages IH_VMID_x_LUT updating */ - if (ring->is_mes_queue) - return; - if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; else @@ -752,6 +745,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; + /* The mall_size is already calculated as mall_size_per_umc * num_umc. + * However, for gfx1151, which features a 2-to-1 UMC mapping, + * the result must be multiplied by 2 to determine the actual mall size. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 5, 1): + adev->gmc.mall_size *= 2; + break; + default: + break; + } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): @@ -832,7 +837,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.first_kfd_vmid = 8; + adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8; amdgpu_vm_manager_init(adev); @@ -899,7 +904,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) return r; /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index 05c026d0b0d9..de763105fdfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -127,10 +127,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid); task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " in process %s pid %d thread %s pid %d)\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -297,7 +294,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; /* flush hdp cache */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* This is necessary for SRIOV as well as for GFXOFF to function * properly under bare metal @@ -413,10 +410,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid struct amdgpu_device *adev = ring->adev; uint32_t reg; - /* MES fw manages IH_VMID_x_LUT updating */ - if (ring->is_mes_queue) - return; - if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; else @@ -820,7 +813,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.first_kfd_vmid = 8; + adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8; amdgpu_vm_manager_init(adev); @@ -881,7 +874,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) return r; /* Flush HDP after it is initialized */ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index a992e79d9581..8030fcd64210 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -249,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) /* disable VGA render */ tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp &= ~VGA_VSTATUS_CNTL; + tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK; WREG32(mmVGA_RENDER_CONTROL, tmp); } /* Update configuration */ @@ -627,17 +627,16 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, "write" : "read", block, mc_client, mc_id); } -/* static const u32 mc_cg_registers[] = { - MC_HUB_MISC_HUB_CG, - MC_HUB_MISC_SIP_CG, - MC_HUB_MISC_VM_CG, - MC_XPB_CLK_GAT, - ATC_MISC_CG, - MC_CITF_MISC_WR_CG, - MC_CITF_MISC_RD_CG, - MC_CITF_MISC_VM_CG, - VM_L2_CG, + mmMC_HUB_MISC_HUB_CG, + mmMC_HUB_MISC_SIP_CG, + mmMC_HUB_MISC_VM_CG, + mmMC_XPB_CLK_GAT, + mmATC_MISC_CG, + mmMC_CITF_MISC_WR_CG, + mmMC_CITF_MISC_RD_CG, + mmMC_CITF_MISC_VM_CG, + mmVM_L2_CG, }; static const u32 mc_cg_ls_en[] = { @@ -672,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) data |= mc_cg_ls_en[i]; else data &= ~mc_cg_ls_en[i]; @@ -689,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) data |= mc_cg_en[i]; else data &= ~mc_cg_en[i]; @@ -705,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev, orig = data = RREG32_PCIE(ixPCIE_CNTL2); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); @@ -728,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev, orig = data = RREG32(mmHDP_HOST_PATH_CNTL); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); else data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); @@ -744,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, orig = data = RREG32(mmHDP_MEM_POWER_LS); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); else data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); @@ -752,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, if (orig != data) WREG32(mmHDP_MEM_POWER_LS, data); } -*/ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) { @@ -1098,6 +1096,20 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state) { + struct amdgpu_device *adev = ip_block->adev; + bool gate = false; + + if (state == AMD_CG_STATE_GATE) + gate = true; + + if (!(adev->flags & AMD_IS_APU)) { + gmc_v6_0_enable_mc_mgcg(adev, gate); + gmc_v6_0_enable_mc_ls(adev, gate); + } + gmc_v6_0_enable_bif_mgls(adev, gate); + gmc_v6_0_enable_hdp_mgcg(adev, gate); + gmc_v6_0_enable_hdp_ls(adev, gate); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 83e39f16044a..a8d5795084fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1157,17 +1157,10 @@ static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block) static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) { unsigned int i; - u32 tmp; struct amdgpu_device *adev = ip_block->adev; for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | - SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | - SRBM_STATUS__MCD_BUSY_MASK | - SRBM_STATUS__VMC_BUSY_MASK); - if (!tmp) + if (gmc_v7_0_is_idle(ip_block)) return 0; udelay(1); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 99ca08e9bdb5..b45fa0cea9d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1458,9 +1458,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 783e0c3b86b4..c4d69cf4e06c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -78,8 +78,6 @@ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 -#define MAX_MEM_RANGES 8 - static const char * const gfxhub_client_ids[] = { "CB", "DB", @@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { (0x001d43e0 + 0x00001800), }; -static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev) -{ - return !!adev->aid_mask; -} - static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, @@ -641,10 +634,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); if (task_info) { - dev_err(adev->dev, - " for process %s pid %d thread %s pid %d)\n", - task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + amdgpu_vm_print_task_info(adev, task_info); amdgpu_vm_put_task_info(task_info); } @@ -652,7 +642,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); @@ -801,7 +791,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || - gmc_v9_0_is_multi_chiplet(adev)) + amdgpu_is_multi_aid(adev)) return false; return ((vmhub == AMDGPU_MMHUB0(0) || @@ -1131,8 +1121,8 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo, - struct amdgpu_bo_va_mapping *mapping, uint64_t *flags) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); @@ -1142,7 +1132,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_EXT_COHERENT); bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - struct amdgpu_vm *vm = mapping->bo_va->base.vm; unsigned int mtype_local, mtype; uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); bool snoop = false; @@ -1172,7 +1161,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, mtype = MTYPE_UC; else mtype = MTYPE_NC; - if (mapping->bo_va->is_xgmi) + if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } } else { @@ -1213,10 +1202,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, if (uncached) { mtype = MTYPE_UC; } else if (ext_coherent) { - if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id) - mtype = is_local ? MTYPE_CC : MTYPE_UC; - else - mtype = MTYPE_UC; + mtype = is_local ? MTYPE_CC : MTYPE_UC; } else if (adev->flags & AMD_IS_APU) { mtype = is_local ? mtype_local : MTYPE_NC; } else { @@ -1267,7 +1253,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, } if ((*flags & AMDGPU_PTE_VALID) && bo) - gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags); + gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo, + flags); } static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, @@ -1336,7 +1323,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, mtype_local = MTYPE_CC; *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local); - } else if (adev->rev_id) { + } else { /* MTYPE_UC case */ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC); } @@ -1388,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static enum amdgpu_memory_partition -gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) -{ - enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; - - if (adev->nbio.funcs->get_memory_partition_mode) - mode = adev->nbio.funcs->get_memory_partition_mode(adev, - supp_modes); - - return mode; -} - -static enum amdgpu_memory_partition -gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev) -{ - switch (adev->gmc.num_mem_partitions) { - case 0: - return UNKNOWN_MEMORY_PARTITION_MODE; - case 1: - return AMDGPU_NPS1_PARTITION_MODE; - case 2: - return AMDGPU_NPS2_PARTITION_MODE; - case 4: - return AMDGPU_NPS4_PARTITION_MODE; - default: - return AMDGPU_NPS1_PARTITION_MODE; - } - - return AMDGPU_NPS1_PARTITION_MODE; -} - -static enum amdgpu_memory_partition -gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) -{ - if (amdgpu_sriov_vf(adev)) - return gmc_v9_0_query_vf_memory_partition(adev); - - return gmc_v9_0_get_memory_partition(adev, NULL); -} - static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) { if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && @@ -1449,7 +1396,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .get_vm_pte = gmc_v9_0_get_vm_pte, .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, - .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, + .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition, .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, .need_reset_on_init = &gmc_v9_0_need_reset_on_init, }; @@ -1505,7 +1452,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM; adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; - adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) adev->umc.ras = &umc_v12_0_ras; break; @@ -1557,7 +1503,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) adev->gfxhub.funcs = &gfxhub_v1_2_funcs; else adev->gfxhub.funcs = &gfxhub_v1_0_funcs; @@ -1603,7 +1549,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) return; - mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); + mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); /* Mode detected by hardware and supported modes available */ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) { @@ -1639,7 +1585,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || - gmc_v9_0_is_multi_chiplet(adev)) + amdgpu_is_multi_aid(adev)) adev->gmc.xgmi.supported = true; if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { @@ -1726,7 +1672,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - if (adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_gmc_is_pdb0_enabled(adev)) { amdgpu_gmc_sysvm_location(adev, mc); } else { amdgpu_gmc_vram_location(adev, mc, base); @@ -1841,7 +1787,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) return 0; } - if (adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_gmc_is_pdb0_enabled(adev)) { adev->gmc.vmid0_page_table_depth = 1; adev->gmc.vmid0_page_table_block_size = 12; } else { @@ -1867,7 +1813,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) if (r) return r; - if (adev->gmc.xgmi.connected_to_cpu) + if (amdgpu_gmc_is_pdb0_enabled(adev)) r = amdgpu_gmc_pdb0_alloc(adev); } @@ -1889,192 +1835,13 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } -static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev) -{ - enum amdgpu_memory_partition mode; - u32 supp_modes; - bool valid; - - mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); - - /* Mode detected by hardware not present in supported modes */ - if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && - !(BIT(mode - 1) & supp_modes)) - return false; - - switch (mode) { - case UNKNOWN_MEMORY_PARTITION_MODE: - case AMDGPU_NPS1_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 1); - break; - case AMDGPU_NPS2_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 2); - break; - case AMDGPU_NPS4_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 3 || - adev->gmc.num_mem_partitions == 4); - break; - default: - valid = false; - } - - return valid; -} - -static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid) -{ - int i; - - /* Check if node with id 'nid' is present in 'node_ids' array */ - for (i = 0; i < num_ids; ++i) - if (node_ids[i] == nid) - return true; - - return false; -} - -static void -gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, - struct amdgpu_mem_partition_info *mem_ranges) -{ - struct amdgpu_numa_info numa_info; - int node_ids[MAX_MEM_RANGES]; - int num_ranges = 0, ret; - int num_xcc, xcc_id; - uint32_t xcc_mask; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - xcc_mask = (1U << num_xcc) - 1; - - for_each_inst(xcc_id, xcc_mask) { - ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); - if (ret) - continue; - - if (numa_info.nid == NUMA_NO_NODE) { - mem_ranges[0].size = numa_info.size; - mem_ranges[0].numa.node = numa_info.nid; - num_ranges = 1; - break; - } - - if (gmc_v9_0_is_node_present(node_ids, num_ranges, - numa_info.nid)) - continue; - - node_ids[num_ranges] = numa_info.nid; - mem_ranges[num_ranges].numa.node = numa_info.nid; - mem_ranges[num_ranges].size = numa_info.size; - ++num_ranges; - } - - adev->gmc.num_mem_partitions = num_ranges; -} - -static void -gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, - struct amdgpu_mem_partition_info *mem_ranges) -{ - enum amdgpu_memory_partition mode; - u32 start_addr = 0, size; - int i, r, l; - - mode = gmc_v9_0_query_memory_partition(adev); - - switch (mode) { - case UNKNOWN_MEMORY_PARTITION_MODE: - adev->gmc.num_mem_partitions = 0; - break; - case AMDGPU_NPS1_PARTITION_MODE: - adev->gmc.num_mem_partitions = 1; - break; - case AMDGPU_NPS2_PARTITION_MODE: - adev->gmc.num_mem_partitions = 2; - break; - case AMDGPU_NPS4_PARTITION_MODE: - if (adev->flags & AMD_IS_APU) - adev->gmc.num_mem_partitions = 3; - else - adev->gmc.num_mem_partitions = 4; - break; - default: - adev->gmc.num_mem_partitions = 1; - break; - } - - /* Use NPS range info, if populated */ - r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, - &adev->gmc.num_mem_partitions); - if (!r) { - l = 0; - for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { - if (mem_ranges[i].range.lpfn > - mem_ranges[i - 1].range.lpfn) - l = i; - } - - } else { - if (!adev->gmc.num_mem_partitions) { - dev_err(adev->dev, - "Not able to detect NPS mode, fall back to NPS1"); - adev->gmc.num_mem_partitions = 1; - } - /* Fallback to sw based calculation */ - size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; - size /= adev->gmc.num_mem_partitions; - - for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { - mem_ranges[i].range.fpfn = start_addr; - mem_ranges[i].size = - ((u64)size << AMDGPU_GPU_PAGE_SHIFT); - mem_ranges[i].range.lpfn = start_addr + size - 1; - start_addr += size; - } - - l = adev->gmc.num_mem_partitions - 1; - } - - /* Adjust the last one */ - mem_ranges[l].range.lpfn = - (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; - mem_ranges[l].size = - adev->gmc.real_vram_size - - ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT); -} - -static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) -{ - bool valid; - - adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES, - sizeof(struct amdgpu_mem_partition_info), - GFP_KERNEL); - if (!adev->gmc.mem_partitions) - return -ENOMEM; - - /* TODO : Get the range from PSP/Discovery for dGPU */ - if (adev->gmc.is_app_apu) - gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); - else - gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); - - if (amdgpu_sriov_vf(adev)) - valid = true; - else - valid = gmc_v9_0_validate_partition_info(adev); - if (!valid) { - /* TODO: handle invalid case */ - dev_WARN(adev->dev, - "Mem ranges not matching with hardware config"); - } - - return 0; -} - static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_width = 128 * 64; + + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; } static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) @@ -2089,7 +1856,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) spin_lock_init(&adev->gmc.invalidate_lock); - if (gmc_v9_0_is_multi_chiplet(adev)) { + if (amdgpu_is_multi_aid(adev)) { gmc_v9_4_3_init_vram_info(adev); } else if (!adev->bios) { if (adev->flags & AMD_IS_APU) { @@ -2239,8 +2006,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_gmc_get_vbios_allocations(adev); - if (gmc_v9_0_is_multi_chiplet(adev)) { - r = gmc_v9_0_init_mem_ranges(adev); + if (amdgpu_is_multi_aid(adev)) { + r = amdgpu_gmc_init_mem_ranges(adev); if (r) return r; } @@ -2268,7 +2035,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vm_manager.first_kfd_vmid = (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || - gmc_v9_0_is_multi_chiplet(adev)) ? + amdgpu_is_multi_aid(adev)) ? 3 : 8; @@ -2280,7 +2047,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) amdgpu_gmc_sysfs_init(adev); return 0; @@ -2290,7 +2057,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) amdgpu_gmc_sysfs_fini(adev); amdgpu_gmc_ras_fini(adev); @@ -2364,7 +2131,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { int r; - if (adev->gmc.xgmi.connected_to_cpu) + if (amdgpu_gmc_is_pdb0_enabled(adev)) amdgpu_gmc_init_pdb0(adev); if (adev->gart.bo == NULL) { @@ -2411,13 +2178,6 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) adev->gmc.flush_tlb_needs_extra_type_2 = amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) && adev->gmc.xgmi.num_physical_nodes; - /* - * TODO: This workaround is badly documented and had a buggy - * implementation. We should probably verify what we do here. - */ - adev->gmc.flush_tlb_needs_extra_type_0 = - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && - adev->rev_id == 0; /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); @@ -2435,7 +2195,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) adev->hdp.funcs->init_registers(adev); /* After HDP is initialized, flush HDP.*/ - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; @@ -2529,7 +2289,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) * information again. */ if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { - gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index f1dc13b3ab38..e6c0d86d3486 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -36,17 +36,6 @@ #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 -static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) { - WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); - } else { - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - } -} - static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { @@ -180,7 +169,7 @@ struct amdgpu_hdp_ras hdp_v4_0_ras = { }; const struct amdgpu_hdp_funcs hdp_v4_0_funcs = { - .flush_hdp = hdp_v4_0_flush_hdp, + .flush_hdp = amdgpu_hdp_generic_flush, .invalidate_hdp = hdp_v4_0_invalidate_hdp, .update_clock_gating = hdp_v4_0_update_clock_gating, .get_clock_gating_state = hdp_v4_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c index 43195c079748..8bc001dc9f63 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -27,17 +27,6 @@ #include "hdp/hdp_5_0_0_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> -static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) { - WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); - } else { - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - } -} - static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { @@ -217,7 +206,7 @@ static void hdp_v5_0_init_registers(struct amdgpu_device *adev) } const struct amdgpu_hdp_funcs hdp_v5_0_funcs = { - .flush_hdp = hdp_v5_0_flush_hdp, + .flush_hdp = amdgpu_hdp_generic_flush, .invalidate_hdp = hdp_v5_0_invalidate_hdp, .update_clock_gating = hdp_v5_0_update_clock_gating, .get_clock_gating_state = hdp_v5_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c index fcb8dd2876bc..40940b4ab400 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c @@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, if (!ring || !ring->funcs->emit_wreg) { WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + if (amdgpu_sriov_vf(adev)) { + /* this is fine because SR_IOV doesn't remap the register */ + RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); + } } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index a88d25a06c29..ec20daf4272c 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -30,17 +30,6 @@ #define regHDP_CLK_CNTL_V6_1 0xd5 #define regHDP_CLK_CNTL_V6_1_BASE_IDX 0 -static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) { - WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); - } else { - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - } -} - static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev, bool enable) { @@ -149,7 +138,7 @@ static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev, } const struct amdgpu_hdp_funcs hdp_v6_0_funcs = { - .flush_hdp = hdp_v6_0_flush_hdp, + .flush_hdp = amdgpu_hdp_generic_flush, .update_clock_gating = hdp_v6_0_update_clock_gating, .get_clock_gating_state = hdp_v6_0_get_clockgating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c index 49f7eb4fbd11..ed1debc03507 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -27,17 +27,6 @@ #include "hdp/hdp_7_0_0_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> -static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) { - WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); - } else { - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - } -} - static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev, bool enable) { @@ -137,7 +126,7 @@ static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev, } const struct amdgpu_hdp_funcs hdp_v7_0_funcs = { - .flush_hdp = hdp_v7_0_flush_hdp, + .flush_hdp = amdgpu_hdp_generic_flush, .update_clock_gating = hdp_v7_0_update_clock_gating, .get_clock_gating_state = hdp_v7_0_get_clockgating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index eb4185dcbd1d..5900b560b7de 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -349,6 +349,7 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev) if (ret) return ret; } + ih[i]->overflow = false; } /* update doorbell range for ih ring 0 */ @@ -446,7 +447,10 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev, wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + if (!amdgpu_sriov_vf(adev)) + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + else + ih->overflow = true; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 32). Hopefully diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index cfa91d709d49..cc626036ed9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -32,6 +32,7 @@ #include "gc/gc_11_0_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); @@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c index df898dbb746e..58cd87db8061 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c @@ -34,12 +34,13 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin"); #define TRANSFER_RAM_MASK 0x001c0000 static int imu_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct imu_firmware_header_v1_0 *imu_hdr; struct amdgpu_firmware_info *info = NULL; @@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; @@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev, static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev) { u32 reg_data, size = 0; - const u32 *data; + const u32 *data = NULL; int r = -EINVAL; WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c index 69dd92f6e86d..a887df520414 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c @@ -25,9 +25,16 @@ * */ +#include <linux/gpio/machine.h> #include "amdgpu.h" #include "isp_v4_1_1.h" +#define ISP_PERFORMANCE_STATE_LOW 0 +#define ISP_PERFORMANCE_STATE_HIGH 1 + +#define ISP_HIGH_PERFORMANC_XCLK 788 +#define ISP_HIGH_PERFORMANC_ICLK 788 + static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = { ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9, ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10, @@ -39,22 +46,182 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = { ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16 }; +static struct gpiod_lookup_table isp_gpio_table = { + .dev_id = "amd_isp_capture", + .table = { + GPIO_LOOKUP("AMDI0030:00", 85, "enable_isp", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static struct gpiod_lookup_table isp_sensor_gpio_table = { + .dev_id = "i2c-ov05c10", + .table = { + GPIO_LOOKUP("amdisp-pinctrl", 0, "enable", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static int isp_poweroff(struct generic_pm_domain *genpd) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + + return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0); +} + +static int isp_poweron(struct generic_pm_domain *genpd) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + + return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0); +} + +static int isp_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + u32 iclk, xclk; + int ret; + + switch (state) { + case ISP_PERFORMANCE_STATE_HIGH: + xclk = ISP_HIGH_PERFORMANC_XCLK; + iclk = ISP_HIGH_PERFORMANC_ICLK; + break; + case ISP_PERFORMANCE_STATE_LOW: + /* isp runs at default lowest clock-rate on power-on, do nothing */ + return 0; + default: + return -EINVAL; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0); + if (ret) { + drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n", + xclk, state, ret); + return ret; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0); + if (ret) { + drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n", + iclk, state, ret); + return ret; + } + + return 0; +} + +static int isp_genpd_add_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + int ret; + + if (!pdev) + return -EINVAL; + + if (!dev->type->name) { + drm_dbg(&adev->ddev, "Invalid device type to add\n"); + goto exit; + } + + if (strcmp(dev->type->name, "mfd_device")) { + drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name); + goto exit; + } + + ret = pm_genpd_add_device(gpd, dev); + if (ret) { + drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n", + pdev->mfd_cell->name, ret); + return -ENODEV; + } + +exit: + /* Continue to add */ + return 0; +} + +static int isp_genpd_remove_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + int ret; + + if (!pdev) + return -EINVAL; + + if (!dev->type->name) { + drm_dbg(&adev->ddev, "Invalid device type to remove\n"); + goto exit; + } + + if (strcmp(dev->type->name, "mfd_device")) { + drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n", + pdev->mfd_cell->name); + goto exit; + } + + ret = pm_genpd_remove_device(dev); + if (ret) { + drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret); + return -ENODEV; + } + +exit: + /* Continue to remove */ + return 0; +} + static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) { + const struct software_node *amd_camera_node, *isp4_node; struct amdgpu_device *adev = isp->adev; + struct acpi_device *acpi_dev; int idx, int_idx, num_res, r; u64 isp_base; if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; + r = amdgpu_acpi_get_isp4_dev(&acpi_dev); + if (r) { + drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r); + /* allow GPU init to progress */ + return 0; + } + + /* add GPIO resources required for OMNI5C10 sensor */ + if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) { + gpiod_add_lookup_table(&isp_gpio_table); + gpiod_add_lookup_table(&isp_sensor_gpio_table); + } + isp_base = adev->rmmio_base; + isp->ispgpd.name = "ISP_v_4_1_1"; + isp->ispgpd.power_off = isp_poweroff; + isp->ispgpd.power_on = isp_poweron; + isp->ispgpd.set_performance_state = isp_set_performance_state; + + r = pm_genpd_init(&isp->ispgpd, NULL, true); + if (r) { + drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r); + return -EINVAL; + } + isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL); if (!isp->isp_cell) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd cell alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r); goto failure; } @@ -64,19 +231,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) GFP_KERNEL); if (!isp->isp_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r); goto failure; } isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL); if (!isp->isp_pdata) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp platform data alloc failed\n", __func__); + drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r); goto failure; } + amd_camera_node = (const struct software_node *)acpi_dev->driver_data; + isp4_node = software_node_find_by_name(amd_camera_node, "isp4"); + /* initialize isp platform data */ isp->isp_pdata->adev = (void *)adev; isp->isp_pdata->asic_type = adev->asic_type; @@ -105,14 +273,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_cell[0].num_resources = num_res; isp->isp_cell[0].resources = &isp->isp_res[0]; isp->isp_cell[0].platform_data = isp->isp_pdata; + isp->isp_cell[0].swnode = isp4_node; isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); /* initialize isp i2c platform data */ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_i2c_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r); goto failure; } @@ -131,8 +299,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_gpio_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp gpio res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r); goto failure; } @@ -148,10 +315,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_cell[2].platform_data = isp->isp_pdata; isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data); - r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3); + /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */ + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2); if (r) { - drm_err(&adev->ddev, - "%s: add mfd hotplug device failed\n", __func__); + drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r); + goto failure; + } + + r = device_for_each_child(isp->parent, &isp->ispgpd, + isp_genpd_add_device); + if (r) { + drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r); + goto failure; + } + + r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1); + if (r) { + drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r); goto failure; } @@ -170,6 +350,9 @@ failure: static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp) { + device_for_each_child(isp->parent, NULL, + isp_genpd_remove_device); + mfd_remove_devices(isp->parent); kfree(isp->isp_res); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 4cde8a8bcc83..58239c405fda 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -118,7 +118,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -764,11 +767,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - jpeg_v2_0_stop(ring->adev); - jpeg_v2_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v2_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v2_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 8b39e114f3be..3e2c389242db 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v2_5_stop_inst(ring->adev, ring->me); jpeg_v2_5_start_inst(ring->adev, ring->me); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 2f8510c2986b..a44eb2667664 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - jpeg_v3_0_stop(ring->adev); - jpeg_v3_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v3_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v3_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index f17ec5414fd6..da3ee69f1a3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - if (amdgpu_sriov_vf(ring->adev)) - return -EINVAL; + int r; - jpeg_v4_0_stop(ring->adev); - jpeg_v4_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v4_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v4_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 5598a35f72af..b86288a69e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -149,6 +149,18 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) return r; } + /* JPEG DJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); + if (r) + return r; + + /* JPEG EJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); + if (r) + return r; + r = amdgpu_jpeg_sw_init(adev); if (r) return r; @@ -204,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; - r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - } + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return 0; } @@ -230,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) - amdgpu_jpeg_sysfs_reset_mask_fini(adev); + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); @@ -434,6 +444,9 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE); } + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); + return ret; } @@ -1041,6 +1054,14 @@ static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1120,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } -static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { if (amdgpu_sriov_vf(ring->adev)) return -EOPNOTSUPP; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v4_0_3_core_stall_reset(ring); jpeg_v4_0_3_start_jrbc(ring); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { @@ -1200,6 +1224,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = { .process = jpeg_v4_0_3_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_ras_irq_funcs = { + .set = jpeg_v4_0_3_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -1208,6 +1237,9 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; } adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; + + adev->jpeg.inst->ras_poison_irq.num_types = 1; + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_3_ras_irq_funcs; } const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = { @@ -1304,9 +1336,47 @@ static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) jpeg_v4_0_3_inst_reset_ras_error_count(adev, i); } +static uint32_t jpeg_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev, + uint32_t instance, uint32_t sub_block) +{ + uint32_t poison_stat = 0, reg_value = 0; + + switch (sub_block) { + case AMDGPU_JPEG_V4_0_3_JPEG0: + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); + break; + case AMDGPU_JPEG_V4_0_3_JPEG1: + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); + break; + default: + break; + } + + if (poison_stat) + dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", + instance, sub_block); + + return poison_stat; +} + +static bool jpeg_v4_0_3_query_ras_poison_status(struct amdgpu_device *adev) +{ + uint32_t inst = 0, sub = 0, poison_stat = 0; + + for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) + for (sub = 0; sub < AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK; sub++) + poison_stat += + jpeg_v4_0_3_query_poison_by_instance(adev, inst, sub); + + return !!poison_stat; +} + static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = { .query_ras_error_count = jpeg_v4_0_3_query_ras_error_count, .reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count, + .query_poison_status = jpeg_v4_0_3_query_ras_poison_status, }; static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, @@ -1328,7 +1398,7 @@ static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_ban 1ULL); break; case ACA_SMU_TYPE_CE: - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); + bank->aca_err_type = ACA_ERROR_TYPE_CE; ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, ACA_REG__MISC0__ERRCNT(misc0)); break; @@ -1383,6 +1453,13 @@ static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_comm if (r) return r; + if (amdgpu_ras_is_supported(adev, ras_block->block) && + adev->jpeg.inst->ras_poison_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); + if (r) + goto late_fini; + } + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, &jpeg_v4_0_3_aca_info, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h index a90bf370a002..2e110d04af84 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h @@ -46,6 +46,13 @@ #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 +enum amdgpu_jpeg_v4_0_3_sub_block { + AMDGPU_JPEG_V4_0_3_JPEG0 = 0, + AMDGPU_JPEG_V4_0_3_JPEG1, + + AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK, +}; + extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block; void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 974030a5c03c..481d1a2dbe5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* TODO: Add queue reset mask when FW fully supports it */ adev->jpeg.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); if (r) return r; @@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v4_0_5_stop(ring->adev); + if (r) + return r; + r = jpeg_v4_0_5_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .name = "jpeg_v4_0_5", .early_init = jpeg_v4_0_5_early_init, @@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v4_0_5_ring_reset, }; static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index 31d213ccbe0a..e0a71909252b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* TODO: Add queue reset mask when FW fully supports it */ adev->jpeg.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - return 0; + + return r; } /** @@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v5_0_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v5_0_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .name = "jpeg_v5_0_0", .early_init = jpeg_v5_0_0_early_init, @@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v5_0_0_ring_reset, }; static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index 218e16b68f1d..54523dc1f702 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -28,15 +28,18 @@ #include "soc15d.h" #include "jpeg_v4_0_3.h" #include "jpeg_v5_0_1.h" +#include "mmsch_v5_0.h" #include "vcn/vcn_5_0_0_offset.h" #include "vcn/vcn_5_0_0_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" +static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev); static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state); +static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring); static int amdgpu_ih_srcid_jpeg[] = { @@ -118,6 +121,7 @@ static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; jpeg_v5_0_1_set_dec_ring_funcs(adev); jpeg_v5_0_1_set_irq_funcs(adev); + jpeg_v5_0_1_set_ras_funcs(adev); return 0; } @@ -142,6 +146,17 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; } + /* JPEG DJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); + if (r) + return r; + + /* JPEG EJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); + if (r) + return r; r = amdgpu_jpeg_sw_init(adev); if (r) @@ -156,21 +171,16 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; - ring->use_doorbell = false; + ring->use_doorbell = true; ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); if (!amdgpu_sriov_vf(adev)) { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + j + 11 * jpeg_inst; } else { - if (j < 4) - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 4 + j + 32 * jpeg_inst; - else - ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 8 + j + 32 * jpeg_inst; + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 2 + j + 32 * jpeg_inst; } sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, @@ -190,14 +200,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; - r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - } + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - return 0; + return r; } /** @@ -216,8 +225,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) - amdgpu_jpeg_sysfs_reset_mask_fini(adev); + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); @@ -237,7 +245,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) int i, j, r, jpeg_inst; if (amdgpu_sriov_vf(adev)) { - /* jpeg_v5_0_1_start_sriov(adev); */ + r = jpeg_v5_0_1_start_sriov(adev); + if (r) + return r; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; @@ -264,7 +275,7 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) ring = &adev->jpeg.inst[i].ring_dec[j]; if (ring->use_doorbell) WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL, - (ring->pipe ? (ring->pipe - 0x15) : 0), + ring->pipe, ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | VCN_JPEG_DB_CTRL__EN_MASK); @@ -291,8 +302,13 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) cancel_delayed_work_sync(&adev->jpeg.idle_work); - if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) - ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); + if (!amdgpu_sriov_vf(adev)) { + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) + ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); + } + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return ret; } @@ -422,6 +438,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring) reg_offset); } +static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw, item_offset; + uint32_t init_status; + int i, j, jpeg_inst; + + struct mmsch_v5_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v5_0_cmd_end end = { {0} }; + struct mmsch_v5_0_init_header header; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + end.cmd_header.command_type = + MMSCH_COMMAND__END; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + jpeg_inst = GET_INST(JPEG, i); + + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + item_offset = header.total_size; + + for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) { + ring = &adev->jpeg.inst[i].ring_dec[j]; + table_size = 0; + + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW); + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr)); + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH); + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr)); + tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE); + MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4); + + if (j < 5) { + header.mjpegdec0[j].table_offset = item_offset; + header.mjpegdec0[j].init_status = 0; + header.mjpegdec0[j].table_size = table_size; + } else { + header.mjpegdec1[j - 5].table_offset = item_offset; + header.mjpegdec1[j - 5].init_status = 0; + header.mjpegdec1[j - 5].table_size = table_size; + } + header.total_size += table_size; + item_offset += table_size; + } + + MMSCH_V5_0_INSERT_END(); + + /* send init table to MMSCH */ + size = sizeof(struct mmsch_v5_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + init_status = + ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status; + while (resp != expected) { + resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP); + + if (resp != 0) + break; + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && + init_status != MMSCH_VF_ENGINE_STATUS__PASS) + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", + resp, init_status); + + } + return 0; +} + /** * jpeg_v5_0_1_start - start JPEG block * @@ -581,6 +710,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, struct amdgpu_device *adev = ip_block->adev; int ret; + if (amdgpu_sriov_vf(adev)) { + adev->jpeg.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->jpeg.cur_state) return 0; @@ -603,6 +737,16 @@ static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + + + static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -688,14 +832,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } -static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - if (amdgpu_sriov_vf(ring->adev)) - return -EOPNOTSUPP; - + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v5_0_1_core_stall_reset(ring); jpeg_v5_0_1_init_jrbc(ring); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = { @@ -772,6 +916,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = { .process = jpeg_v5_0_1_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = { + .set = jpeg_v5_0_1_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -780,6 +929,10 @@ static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs; + + adev->jpeg.inst->ras_poison_irq.num_types = 1; + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs; + } const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = { @@ -789,3 +942,150 @@ const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = { .rev = 1, .funcs = &jpeg_v5_0_1_ip_funcs, }; + +static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, + uint32_t instance, uint32_t sub_block) +{ + uint32_t poison_stat = 0, reg_value = 0; + + switch (sub_block) { + case AMDGPU_JPEG_V5_0_1_JPEG0: + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); + break; + case AMDGPU_JPEG_V5_0_1_JPEG1: + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); + break; + default: + break; + } + + if (poison_stat) + dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", + instance, sub_block); + + return poison_stat; +} + +static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev) +{ + uint32_t inst = 0, sub = 0, poison_stat = 0; + + for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) + for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++) + poison_stat += + jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub); + + return !!poison_stat; +} + +static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = { + .query_poison_status = jpeg_v5_0_1_query_ras_poison_status, +}; + +static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + struct aca_bank_info info; + u64 misc0; + int ret; + + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + bank->aca_err_type = ACA_ERROR_TYPE_UE; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + bank->aca_err_type = ACA_ERROR_TYPE_CE; + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; + } + + return ret; +} + +/* reference to smu driver if header file */ +static int jpeg_v5_0_1_err_codes[] = { + 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */ + 24, 25, 26, 27, 28, 29, 30, 31 +}; + +static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + jpeg_v5_0_1_err_codes, + ARRAY_SIZE(jpeg_v5_0_1_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = { + .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser, + .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid, +}; + +static const struct aca_info jpeg_v5_0_1_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &jpeg_v5_0_1_aca_bank_ops, +}; + +static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block) && + adev->jpeg.inst->ras_poison_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); + if (r) + goto late_fini; + } + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, + &jpeg_v5_0_1_aca_info, NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + +static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = { + .ras_block = { + .hw_ops = &jpeg_v5_0_1_ras_hw_ops, + .ras_late_init = jpeg_v5_0_1_ras_late_init, + }, +}; + +static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.ras = &jpeg_v5_0_1_ras; +} diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h index efdab57324e4..a7e58d5fb246 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h @@ -26,6 +26,9 @@ extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block; +#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094 +#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe + #define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640 #define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1 #define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649 @@ -98,4 +101,11 @@ extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block; #define regVCN_RRMT_CNTL 0x0940 #define regVCN_RRMT_CNTL_BASE_IDX 1 +enum amdgpu_jpeg_v5_0_1_sub_block { + AMDGPU_JPEG_V5_0_1_JPEG0 = 0, + AMDGPU_JPEG_V5_0_1_JPEG1, + + AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK, +}; + #endif /* __JPEG_V5_0_1_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c new file mode 100644 index 000000000000..d6f50b13e2ba --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "mes_userqueue.h" +#include "amdgpu_userq_fence.h" + +#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE +#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE + +static int +mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo) +{ + int ret; + + ret = amdgpu_bo_reserve(bo, true); + if (ret) { + DRM_ERROR("Failed to reserve bo. ret %d\n", ret); + goto err_reserve_bo_failed; + } + + ret = amdgpu_ttm_alloc_gart(&bo->tbo); + if (ret) { + DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret); + goto err_map_bo_gart_failed; + } + + amdgpu_bo_unreserve(bo); + bo = amdgpu_bo_ref(bo); + + return 0; + +err_map_bo_gart_failed: + amdgpu_bo_unreserve(bo); +err_reserve_bo_failed: + return ret; +} + +static int +mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue, + uint64_t wptr) +{ + struct amdgpu_bo_va_mapping *wptr_mapping; + struct amdgpu_vm *wptr_vm; + struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj; + int ret; + + wptr_vm = queue->vm; + ret = amdgpu_bo_reserve(wptr_vm->root.bo, false); + if (ret) + return ret; + + wptr &= AMDGPU_GMC_HOLE_MASK; + wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT); + amdgpu_bo_unreserve(wptr_vm->root.bo); + if (!wptr_mapping) { + DRM_ERROR("Failed to lookup wptr bo\n"); + return -EINVAL; + } + + wptr_obj->obj = wptr_mapping->bo_va->base.bo; + if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) { + DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n"); + return -EINVAL; + } + + ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj); + if (ret) { + DRM_ERROR("Failed to map wptr bo to GART\n"); + return ret; + } + + queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj); + return 0; +} + +static int convert_to_mes_priority(int priority) +{ + switch (priority) { + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW: + default: + return AMDGPU_MES_PRIORITY_LEVEL_NORMAL; + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW: + return AMDGPU_MES_PRIORITY_LEVEL_LOW; + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH: + return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM; + case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH: + return AMDGPU_MES_PRIORITY_LEVEL_HIGH; + } +} + +static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + struct amdgpu_mqd_prop *userq_props = queue->userq_prop; + struct mes_add_queue_input queue_input; + int r; + + memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); + + queue_input.process_va_start = 0; + queue_input.process_va_end = adev->vm_manager.max_pfn - 1; + + /* set process quantum to 10 ms and gang quantum to 1 ms as default */ + queue_input.process_quantum = 100000; + queue_input.gang_quantum = 10000; + queue_input.paging = false; + + queue_input.process_context_addr = ctx->gpu_addr; + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; + queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority); + + queue_input.process_id = queue->vm->pasid; + queue_input.queue_type = queue->queue_type; + queue_input.mqd_addr = queue->mqd.gpu_addr; + queue_input.wptr_addr = userq_props->wptr_gpu_addr; + queue_input.queue_size = userq_props->queue_size >> 2; + queue_input.doorbell_offset = userq_props->doorbell_index; + queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo); + queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr; + + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); + if (r) { + DRM_ERROR("Failed to map queue in HW, err (%d)\n", r); + return r; + } + + DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index); + return 0; +} + +static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct mes_remove_queue_input queue_input; + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + int r; + + memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); + queue_input.doorbell_offset = queue->doorbell_index; + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; + + amdgpu_mes_lock(&adev->mes); + r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); + if (r) + DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r); + return r; +} + +static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue, + struct drm_amdgpu_userq_in *mqd_user) +{ + struct amdgpu_userq_obj *ctx = &queue->fw_obj; + int r, size; + + /* + * The FW expects at least one page space allocated for + * process ctx and gang ctx each. Create an object + * for the same. + */ + size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ; + r = amdgpu_userq_create_object(uq_mgr, ctx, size); + if (r) { + DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r); + return r; + } + + return 0; +} + +static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, + struct drm_amdgpu_userq_in *args_in, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; + struct drm_amdgpu_userq_in *mqd_user = args_in; + struct amdgpu_mqd_prop *userq_props; + int r; + + /* Structure to initialize MQD for userqueue using generic MQD init function */ + userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL); + if (!userq_props) { + DRM_ERROR("Failed to allocate memory for userq_props\n"); + return -ENOMEM; + } + + if (!mqd_user->wptr_va || !mqd_user->rptr_va || + !mqd_user->queue_va || mqd_user->queue_size == 0) { + DRM_ERROR("Invalid MQD parameters for userqueue\n"); + r = -EINVAL; + goto free_props; + } + + r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size); + if (r) { + DRM_ERROR("Failed to create MQD object for userqueue\n"); + goto free_props; + } + + /* Initialize the MQD BO with user given values */ + userq_props->wptr_gpu_addr = mqd_user->wptr_va; + userq_props->rptr_gpu_addr = mqd_user->rptr_va; + userq_props->queue_size = mqd_user->queue_size; + userq_props->hqd_base_gpu_addr = mqd_user->queue_va; + userq_props->mqd_gpu_addr = queue->mqd.gpu_addr; + userq_props->use_doorbell = true; + userq_props->doorbell_index = queue->doorbell_index; + userq_props->fence_address = queue->fence_drv->gpu_addr; + + if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { + struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; + + if (mqd_user->mqd_size != sizeof(*compute_mqd)) { + DRM_ERROR("Invalid compute IP MQD size\n"); + r = -EINVAL; + goto free_mqd; + } + + compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); + if (IS_ERR(compute_mqd)) { + DRM_ERROR("Failed to read user MQD\n"); + r = -ENOMEM; + goto free_mqd; + } + + userq_props->eop_gpu_addr = compute_mqd->eop_va; + userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; + userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; + userq_props->hqd_active = false; + userq_props->tmz_queue = + mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; + kfree(compute_mqd); + } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { + struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; + + if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) { + DRM_ERROR("Invalid GFX MQD\n"); + r = -EINVAL; + goto free_mqd; + } + + mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); + if (IS_ERR(mqd_gfx_v11)) { + DRM_ERROR("Failed to read user MQD\n"); + r = -ENOMEM; + goto free_mqd; + } + + userq_props->shadow_addr = mqd_gfx_v11->shadow_va; + userq_props->csa_addr = mqd_gfx_v11->csa_va; + userq_props->tmz_queue = + mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; + kfree(mqd_gfx_v11); + } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { + struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; + + if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) { + DRM_ERROR("Invalid SDMA MQD\n"); + r = -EINVAL; + goto free_mqd; + } + + mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); + if (IS_ERR(mqd_sdma_v11)) { + DRM_ERROR("Failed to read sdma user MQD\n"); + r = -ENOMEM; + goto free_mqd; + } + + userq_props->csa_addr = mqd_sdma_v11->csa_va; + kfree(mqd_sdma_v11); + } + + queue->userq_prop = userq_props; + + r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props); + if (r) { + DRM_ERROR("Failed to initialize MQD for userqueue\n"); + goto free_mqd; + } + + /* Create BO for FW operations */ + r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user); + if (r) { + DRM_ERROR("Failed to allocate BO for userqueue (%d)", r); + goto free_mqd; + } + + /* FW expects WPTR BOs to be mapped into GART */ + r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr); + if (r) { + DRM_ERROR("Failed to create WPTR mapping\n"); + goto free_ctx; + } + + return 0; + +free_ctx: + amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj); + +free_mqd: + amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); + +free_props: + kfree(userq_props); + + return r; +} + +static void +mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr, + struct amdgpu_usermode_queue *queue) +{ + amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj); + kfree(queue->userq_prop); + amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); +} + +const struct amdgpu_userq_funcs userq_mes_funcs = { + .mqd_create = mes_userq_mqd_create, + .mqd_destroy = mes_userq_mqd_destroy, + .unmap = mes_userq_unmap, + .map = mes_userq_map, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h new file mode 100644 index 000000000000..090ae8897770 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef MES_USERQ_H +#define MES_USERQ_H +#include "amdgpu_userq.h" + +extern const struct amdgpu_userq_funcs userq_mes_funcs; +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e65916ada23b..28eb846280dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -287,6 +287,23 @@ static int convert_to_mes_queue_type(int queue_type) return -1; } +static int convert_to_mes_priority_level(int priority_level) +{ + switch (priority_level) { + case AMDGPU_MES_PRIORITY_LEVEL_LOW: + return AMD_PRIORITY_LEVEL_LOW; + case AMDGPU_MES_PRIORITY_LEVEL_NORMAL: + default: + return AMD_PRIORITY_LEVEL_NORMAL; + case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM: + return AMD_PRIORITY_LEVEL_MEDIUM; + case AMDGPU_MES_PRIORITY_LEVEL_HIGH: + return AMD_PRIORITY_LEVEL_HIGH; + case AMDGPU_MES_PRIORITY_LEVEL_REALTIME: + return AMD_PRIORITY_LEVEL_REALTIME; + } +} + static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, struct mes_add_queue_input *input) { @@ -310,9 +327,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.gang_quantum = input->gang_quantum; mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; mes_add_queue_pkt.inprocess_gang_priority = - input->inprocess_gang_priority; + convert_to_mes_priority_level(input->inprocess_gang_priority); mes_add_queue_pkt.gang_global_priority_level = - input->gang_global_priority_level; + convert_to_mes_priority_level(input->gang_global_priority_level); mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; mes_add_queue_pkt.mqd_addr = input->mqd_addr; @@ -458,31 +475,6 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ return r; } -static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes, - struct mes_reset_queue_input *input) -{ - if (input->use_mmio) - return mes_v11_0_reset_queue_mmio(mes, input->queue_type, - input->me_id, input->pipe_id, - input->queue_id, input->vmid); - - union MESAPI__RESET mes_reset_queue_pkt; - - memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); - - mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; - mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; - mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; - - mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; - mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr; - /*mes_reset_queue_pkt.reset_queue_only = 1;*/ - - return mes_v11_0_submit_pkt_and_poll_completion(mes, - &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), - offsetof(union MESAPI__REMOVE_QUEUE, api_status)); -} - static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes, struct mes_map_legacy_queue_input *input) { @@ -649,7 +641,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, break; case MES_MISC_OP_CHANGE_CONFIG: if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) { - dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n"); + dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n"); return -EINVAL; } misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; @@ -694,7 +686,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes->compute_hqd_mask[i]; for (i = 0; i < MAX_GFX_PIPES; i++) - mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i]; + mes_set_hw_res_pkt.gfx_hqd_mask[i] = + mes->gfx_hqd_mask[i]; for (i = 0; i < MAX_SDMA_PIPES; i++) mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i]; @@ -723,7 +716,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes->event_log_gpu_addr; } - if (enforce_isolation) + if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE) mes_set_hw_res_pkt.limit_single_process = 1; return mes_v11_0_submit_pkt_and_poll_completion(mes, @@ -753,8 +746,8 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes) offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status)); } -static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes, - struct mes_reset_legacy_queue_input *input) +static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes, + struct mes_reset_queue_input *input) { union MESAPI__RESET mes_reset_queue_pkt; @@ -772,7 +765,7 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes, mes_reset_queue_pkt.queue_type = convert_to_mes_queue_type(input->queue_type); - if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) { + if (input->legacy_gfx) { mes_reset_queue_pkt.reset_legacy_gfx = 1; mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; mes_reset_queue_pkt.queue_id_lp = input->queue_id; @@ -798,7 +791,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .suspend_gang = mes_v11_0_suspend_gang, .resume_gang = mes_v11_0_resume_gang, .misc_op = mes_v11_0_misc_op, - .reset_legacy_queue = mes_v11_0_reset_legacy_queue, .reset_hw_queue = mes_v11_0_reset_hw_queue, }; @@ -894,6 +886,10 @@ static void mes_v11_0_get_fw_version(struct amdgpu_device *adev) { int pipe; + /* return early if we have already fetched these */ + if (adev->mes.sched_version && adev->mes.kiq_version) + return; + /* get MES scheduler/KIQ versions */ mutex_lock(&adev->srbm_mutex); @@ -1634,10 +1630,12 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) if (r) goto failure; - r = mes_v11_0_set_hw_resources_1(&adev->mes); - if (r) { - DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); - goto failure; + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) { + r = mes_v11_0_set_hw_resources_1(&adev->mes); + if (r) { + DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); + goto failure; + } } r = mes_v11_0_query_sched_status(&adev->mes); @@ -1697,22 +1695,10 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) return 0; } -static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - - /* it's only intended for use in mes_self_test case, not for s0ix and reset */ - if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && - (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))) - amdgpu_mes_self_test(adev); - - return 0; -} - static const struct amd_ip_funcs mes_v11_0_ip_funcs = { .name = "mes_v11_0", .early_init = mes_v11_0_early_init, - .late_init = mes_v11_0_late_init, + .late_init = NULL, .sw_init = mes_v11_0_sw_init, .sw_fini = mes_v11_0_sw_fini, .hw_init = mes_v11_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 183dd3346da5..6b222630f3fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -274,6 +274,23 @@ static int convert_to_mes_queue_type(int queue_type) return -1; } +static int convert_to_mes_priority_level(int priority_level) +{ + switch (priority_level) { + case AMDGPU_MES_PRIORITY_LEVEL_LOW: + return AMD_PRIORITY_LEVEL_LOW; + case AMDGPU_MES_PRIORITY_LEVEL_NORMAL: + default: + return AMD_PRIORITY_LEVEL_NORMAL; + case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM: + return AMD_PRIORITY_LEVEL_MEDIUM; + case AMDGPU_MES_PRIORITY_LEVEL_HIGH: + return AMD_PRIORITY_LEVEL_HIGH; + case AMDGPU_MES_PRIORITY_LEVEL_REALTIME: + return AMD_PRIORITY_LEVEL_REALTIME; + } +} + static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes, struct mes_add_queue_input *input) { @@ -297,9 +314,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.gang_quantum = input->gang_quantum; mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; mes_add_queue_pkt.inprocess_gang_priority = - input->inprocess_gang_priority; + convert_to_mes_priority_level(input->inprocess_gang_priority); mes_add_queue_pkt.gang_global_priority_level = - input->gang_global_priority_level; + convert_to_mes_priority_level(input->gang_global_priority_level); mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; mes_add_queue_pkt.mqd_addr = input->mqd_addr; @@ -477,32 +494,6 @@ static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ return r; } -static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes, - struct mes_reset_queue_input *input) -{ - union MESAPI__RESET mes_reset_queue_pkt; - int pipe; - - memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); - - mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; - mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; - mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; - - mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; - mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr; - /*mes_reset_queue_pkt.reset_queue_only = 1;*/ - - if (mes->adev->enable_uni_mes) - pipe = AMDGPU_MES_KIQ_PIPE; - else - pipe = AMDGPU_MES_SCHED_PIPE; - - return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, - &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), - offsetof(union MESAPI__REMOVE_QUEUE, api_status)); -} - static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes, struct mes_map_legacy_queue_input *input) { @@ -762,7 +753,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE); } - if (enforce_isolation) + if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE) mes_set_hw_res_pkt.limit_single_process = 1; return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, @@ -845,8 +836,8 @@ static void mes_v12_0_enable_unmapped_doorbell_handling( WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data); } -static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes, - struct mes_reset_legacy_queue_input *input) +static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes, + struct mes_reset_queue_input *input) { union MESAPI__RESET mes_reset_queue_pkt; int pipe; @@ -865,7 +856,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes, mes_reset_queue_pkt.queue_type = convert_to_mes_queue_type(input->queue_type); - if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) { + if (input->legacy_gfx) { mes_reset_queue_pkt.reset_legacy_gfx = 1; mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; mes_reset_queue_pkt.queue_id_lp = input->queue_id; @@ -878,7 +869,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes, mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; } - if (mes->adev->enable_uni_mes) + if (input->is_kq) pipe = AMDGPU_MES_KIQ_PIPE; else pipe = AMDGPU_MES_SCHED_PIPE; @@ -896,7 +887,6 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = { .suspend_gang = mes_v12_0_suspend_gang, .resume_gang = mes_v12_0_resume_gang, .misc_op = mes_v12_0_misc_op, - .reset_legacy_queue = mes_v12_0_reset_legacy_queue, .reset_hw_queue = mes_v12_0_reset_hw_queue, }; @@ -1392,17 +1382,20 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev, mes_v12_0_queue_init_register(ring); } - /* get MES scheduler/KIQ versions */ - mutex_lock(&adev->srbm_mutex); - soc21_grbm_select(adev, 3, pipe, 0, 0); + if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || + ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { + /* get MES scheduler/KIQ versions */ + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, pipe, 0, 0); - if (pipe == AMDGPU_MES_SCHED_PIPE) - adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) - adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + if (pipe == AMDGPU_MES_SCHED_PIPE) + adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); + else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) + adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); - soc21_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } return 0; } @@ -1749,7 +1742,8 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) if (r) goto failure; - mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE); + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b) + mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE); mes_v12_0_init_aggregated_doorbell(&adev->mes); @@ -1808,21 +1802,10 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) return 0; } -static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block) -{ - struct amdgpu_device *adev = ip_block->adev; - - /* it's only intended for use in mes_self_test case, not for s0ix and reset */ - if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend) - amdgpu_mes_self_test(adev); - - return 0; -} - static const struct amd_ip_funcs mes_v12_0_ip_funcs = { .name = "mes_v12_0", .early_init = mes_v12_0_early_init, - .late_init = mes_v12_0_late_init, + .late_init = NULL, .sw_init = mes_v12_0_sw_init, .sw_fini = mes_v12_0_sw_fini, .hw_init = mes_v12_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index a54e7b929295..cc688ae79e84 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -30,6 +30,7 @@ #include "soc15_common.h" #include "soc15.h" #include "amdgpu_ras.h" +#include "amdgpu_psp.h" #define regVM_L2_CNTL3_DEFAULT 0x80100007 #define regVM_L2_CNTL4_DEFAULT 0x000000c1 @@ -75,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) { + uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ? + adev->gmc.vram_start : adev->gmc.fb_start; uint64_t pt_base; u32 inst_mask; int i; @@ -94,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) if (adev->gmc.pdb0_bo) { WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); + (u32)(gart_start >> 12)); WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + (u32)(gart_start >> 44)); WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, @@ -192,10 +195,8 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) uint32_t tmp, inst_mask; int i; - /* Setup TLB control */ - inst_mask = adev->aid_mask; - for_each_inst(i, inst_mask) { - tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); + if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) { + tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); @@ -209,7 +210,26 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); - WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); + psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL); + } else { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { + tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, + 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); + + WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); + } } } @@ -221,6 +241,9 @@ static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev) uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; + if (amdgpu_sriov_vf(adev)) + return; + inst_mask = adev->aid_mask; for_each_inst(i, inst_mask) { for (j = 0; j < 5; j++) { /* DAGB instances */ @@ -454,6 +477,30 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) return 0; } +static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev) +{ + u32 tmp; + u32 i, inst_mask; + + if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) { + tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL); + } else { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { + tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, + 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); + } + } +} + static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; @@ -467,15 +514,6 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) for (i = 0; i < 16; i++) WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); - - /* Setup TLB control */ - tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, - 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - ENABLE_ADVANCED_DRIVER_MODEL, 0); - WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp); - if (!amdgpu_sriov_vf(adev)) { /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL); @@ -485,6 +523,8 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0); } } + + mmhub_v1_8_disable_l1_tlb(adev); } /** @@ -751,7 +791,7 @@ static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank 1ULL); break; case ACA_SMU_TYPE_CE: - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); + bank->aca_err_type = ACA_ERROR_TYPE_CE; ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, ACA_REG__MISC0__ERRCNT(misc0)); break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c index f2ab5001b492..951998454b25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c @@ -37,39 +37,31 @@ static const char *mmhub_client_ids_v4_1_0[][2] = { [0][0] = "VMC", [4][0] = "DCEDMC", - [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [16][0] = "LSDMA", + [17][0] = "JPEG", + [19][0] = "VCNU", + [22][0] = "VSCH", + [23][0] = "HDP", + [32+23][0] = "VCNRD", [3][1] = "DCEDWB", [4][1] = "DCEDMC", - [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPIO", [10][1] = "DBGU0", [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", + [12][1] = "DBGUNBIO", [14][1] = "XDP", [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [16][1] = "LSDMA", + [17][1] = "JPEG", + [18][1] = "VCNWR", + [19][1] = "VCNU", + [22][1] = "VSCH", + [23][1] = "HDP", }; static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h new file mode 100644 index 000000000000..6f749814929f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h @@ -0,0 +1,144 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V5_0_H__ +#define __MMSCH_V5_0_H__ + +#include "amdgpu_vcn.h" + +#define MMSCH_VERSION_MAJOR 5 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +#define RB_ENABLED (1 << 0) +#define RB4_ENABLED (1 << 1) + +#define MMSCH_VF_ENGINE_STATUS__PASS 0x1 + +#define MMSCH_VF_MAILBOX_RESP__OK 0x1 +#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2 +#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3 +#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4 +#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5 + +enum mmsch_v5_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v5_0_table_info { + uint32_t init_status; + uint32_t table_offset; + uint32_t table_size; +}; + +struct mmsch_v5_0_init_header { + uint32_t version; + uint32_t total_size; + struct mmsch_v5_0_table_info vcn0; + struct mmsch_v5_0_table_info mjpegdec0[5]; + struct mmsch_v5_0_table_info mjpegdec1[5]; +}; + +struct mmsch_v5_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v5_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v5_0_cmd_direct_write { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v5_0_cmd_direct_read_modify_write { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v5_0_cmd_direct_polling { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v5_0_cmd_end { + struct mmsch_v5_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v5_0_cmd_indirect_write { + struct mmsch_v5_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \ + size_dw = size / 4; \ + direct_rd_mod_wt.cmd_header.reg_offset = reg; \ + direct_rd_mod_wt.mask_value = mask; \ + direct_rd_mod_wt.write_data = data; \ + memcpy((void *)table_loc, &direct_rd_mod_wt, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \ + size = sizeof(struct mmsch_v5_0_cmd_direct_write); \ + size_dw = size / 4; \ + direct_wt.cmd_header.reg_offset = reg; \ + direct_wt.reg_value = value; \ + memcpy((void *)table_loc, &direct_wt, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \ + size_dw = size / 4; \ + direct_poll.cmd_header.reg_offset = reg; \ + direct_poll.mask_value = mask; \ + direct_poll.wait_value = wait; \ + memcpy((void *)table_loc, &direct_poll, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V5_0_INSERT_END() { \ + size = sizeof(struct mmsch_v5_0_cmd_end); \ + size_dw = size / 4; \ + memcpy((void *)table_loc, &end, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index f5411b798e11..48101a34e049 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -274,6 +274,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) { struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + struct amdgpu_reset_context reset_context = { 0 }; amdgpu_virt_fini_data_exchange(adev); @@ -281,8 +282,6 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) if (amdgpu_device_should_recover_gpu(adev) && (!amdgpu_device_has_job_running(adev) || adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) { - struct amdgpu_reset_context reset_context; - memset(&reset_context, 0, sizeof(reset_context)); reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; @@ -293,6 +292,19 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } } +static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_fini_data_exchange(adev); + amdgpu_virt_request_bad_pages(adev); + amdgpu_virt_init_data_exchange(adev); + up_read(&adev->reset_domain->sem); + } +} + static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -312,26 +324,42 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); switch (event) { - case IDH_FLR_NOTIFICATION: + case IDH_RAS_BAD_PAGES_NOTIFICATION: + xgpu_ai_mailbox_send_ack(adev); + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.bad_pages_work); + break; + case IDH_UNRECOV_ERR_NOTIFICATION: + xgpu_ai_mailbox_send_ack(adev); + ras->is_rma = true; + dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n"); if (amdgpu_sriov_runtime(adev)) WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, - &adev->virt.flr_work), - "Failed to queue work! at %s", - __func__); + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); break; - case IDH_QUERY_ALIVE: - xgpu_ai_mailbox_send_ack(adev); - break; - /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore - * it byfar since that polling thread will handle it, - * other msg like flr complete is not handled here. - */ - case IDH_CLR_MSG_BUF: - case IDH_FLR_NOTIFICATION_CMPL: - case IDH_READY_TO_ACCESS_GPU: - default: + case IDH_FLR_NOTIFICATION: + if (amdgpu_sriov_runtime(adev)) + WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); + break; + case IDH_QUERY_ALIVE: + xgpu_ai_mailbox_send_ack(adev); + break; + /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore + * it byfar since that polling thread will handle it, + * other msg like flr complete is not handled here. + */ + case IDH_CLR_MSG_BUF: + case IDH_FLR_NOTIFICATION_CMPL: + case IDH_READY_TO_ACCESS_GPU: + default: break; } @@ -387,6 +415,7 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) } INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); + INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index ed57cbc150af..874b9f8f9804 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -40,6 +40,7 @@ enum idh_request { IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, IDH_RAS_POISON = 202, + IDH_REQ_RAS_BAD_PAGES = 205, }; enum idh_event { @@ -54,6 +55,9 @@ enum idh_event { IDH_RAS_POISON_READY, IDH_PF_SOFT_FLR_NOTIFICATION, IDH_RAS_ERROR_DETECTED, + IDH_RAS_BAD_PAGES_READY = 15, + IDH_RAS_BAD_PAGES_NOTIFICATION = 16, + IDH_UNRECOV_ERR_NOTIFICATION = 17, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 5aadf24cb202..f6d8597452ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -67,6 +67,8 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); if (reg == IDH_FAIL) r = -EINVAL; + if (reg == IDH_UNRECOV_ERR_NOTIFICATION) + r = -ENODEV; else if (reg != event) return -ENOENT; @@ -103,6 +105,7 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) { int r; uint64_t timeout, now; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); now = (uint64_t)ktime_to_ms(ktime_get()); timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT; @@ -110,8 +113,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) do { r = xgpu_nv_mailbox_rcv_msg(adev, event); if (!r) { - dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now); + dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", + event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now); return 0; + } else if (r == -ENODEV) { + if (!amdgpu_ras_is_rma(adev)) { + ras->is_rma = true; + dev_err(adev->dev, "VF is in an unrecoverable state. " + "Runtime Services are halted.\n"); + } + return r; } msleep(10); @@ -166,6 +177,10 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, enum idh_event event = -1; send_request: + + if (amdgpu_ras_is_rma(adev)) + return -ENODEV; + xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); switch (req) { @@ -187,6 +202,9 @@ send_request: case IDH_REQ_RAS_CPER_DUMP: event = IDH_RAS_CPER_DUMP_READY; break; + case IDH_REQ_RAS_BAD_PAGES: + event = IDH_RAS_BAD_PAGES_READY; + break; default: break; } @@ -320,6 +338,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) { struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + struct amdgpu_reset_context reset_context = { 0 }; amdgpu_virt_fini_data_exchange(adev); @@ -330,8 +349,6 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) { - struct amdgpu_reset_context reset_context; - memset(&reset_context, 0, sizeof(reset_context)); reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; @@ -342,6 +359,19 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) } } +static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + + if (down_read_trylock(&adev->reset_domain->sem)) { + amdgpu_virt_fini_data_exchange(adev); + amdgpu_virt_request_bad_pages(adev); + amdgpu_virt_init_data_exchange(adev); + up_read(&adev->reset_domain->sem); + } +} + static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -364,8 +394,27 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); switch (event) { + case IDH_RAS_BAD_PAGES_NOTIFICATION: + xgpu_nv_mailbox_send_ack(adev); + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.bad_pages_work); + break; + case IDH_UNRECOV_ERR_NOTIFICATION: + xgpu_nv_mailbox_send_ack(adev); + if (!amdgpu_ras_is_rma(adev)) { + ras->is_rma = true; + dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n"); + } + + if (amdgpu_sriov_runtime(adev)) + WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); + break; case IDH_FLR_NOTIFICATION: if (amdgpu_sriov_runtime(adev)) WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, @@ -436,6 +485,7 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) } INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); + INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work); return 0; } @@ -480,6 +530,11 @@ static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr) adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0); } +static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -492,4 +547,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, .req_ras_err_count = xgpu_nv_req_ras_err_count, .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump, + .req_bad_pages = xgpu_nv_req_ras_bad_pages, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 72c9fceb9d79..5808689562cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -42,6 +42,7 @@ enum idh_request { IDH_RAS_POISON = 202, IDH_REQ_RAS_ERROR_COUNT = 203, IDH_REQ_RAS_CPER_DUMP = 204, + IDH_REQ_RAS_BAD_PAGES = 205, }; enum idh_event { @@ -58,6 +59,9 @@ enum idh_event { IDH_RAS_ERROR_DETECTED, IDH_RAS_ERROR_COUNT_READY = 11, IDH_RAS_CPER_DUMP_READY = 14, + IDH_RAS_BAD_PAGES_READY = 15, + IDH_RAS_BAD_PAGES_NOTIFICATION = 16, + IDH_UNRECOV_ERR_NOTIFICATION = 17, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c index 2ece3ae75ec1..bed5ef4d8788 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -360,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev, *flags |= AMD_CG_SUPPORT_BIF_LS; } -#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) +#define MMIO_REG_HOLE_OFFSET 0x44000 static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index f23cb79110d6..1c22bc11c1f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -31,9 +31,6 @@ #define NPS_MODE_MASK 0x000000FFL -/* Core 0 Port 0 counter */ -#define smnPCIEP_NAK_COUNTER 0x1A340218 - static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -177,8 +174,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do { u32 doorbell_range = 0, doorbell_ctrl = 0; u32 aid_id = instance; + u32 range_size; if (use_doorbell) { + range_size = (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 5, 0)) ? + 0xb : 0x9; doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, BIF_DOORBELL0_RANGE_OFFSET_ENTRY, @@ -186,7 +187,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, BIF_DOORBELL0_RANGE_SIZE_ENTRY, - 0x9); + range_size); if (aid_id) doorbell_range = REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0, @@ -204,7 +205,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, - S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9); + S2A_DOORBELL_PORT1_RANGE_SIZE, range_size); doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL, S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4); @@ -463,22 +464,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) } } -static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev) -{ - u32 val, nak_r, nak_g; - - if (adev->flags & AMD_IS_APU) - return 0; - - /* Get the number of NAKs received and generated */ - val = RREG32_PCIE(smnPCIEP_NAK_COUNTER); - nak_r = val & 0xFFFF; - nak_g = val >> 16; - - /* Add the total number of NAKs, i.e the number of replays */ - return (nak_r + nak_g); -} - #define MMIO_REG_HOLE_OFFSET 0x1A000 static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev) @@ -520,7 +505,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested, .init_registers = nbio_v7_9_init_registers, - .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, .set_reg_remap = nbio_v7_9_set_reg_remap, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index f4a91b126c73..73f87131a7e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -106,7 +106,9 @@ enum psp_gfx_cmd_id /*IDs of performance monitoring/profiling*/ GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */ /* Dynamic memory partitioninig (NPS mode change)*/ - GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ + GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ + GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */ + GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */ }; /* PSP boot config sub-commands */ @@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg { uint32_t boot_cfg; /* boot config data */ }; +/* Command-specific response for fw reserve info */ +struct psp_gfx_uresp_fw_reserve_info { + uint32_t reserve_base_address_hi; + uint32_t reserve_base_address_lo; + uint32_t reserve_size; +}; + /* Union of command-specific responses for GPCOM ring. */ union psp_gfx_uresp { struct psp_gfx_uresp_reserved reserved; struct psp_gfx_uresp_bootcfg boot_cfg; struct psp_gfx_uresp_fwar_db_info fwar_db_info; + struct psp_gfx_uresp_fw_reserve_info fw_reserve_info; }; /* Structure of GFX Response buffer. diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 145186a1e48f..3584b8c18fd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index bb5dfc410a66..6cc05d36e359 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -152,11 +152,9 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -252,8 +250,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -277,11 +275,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -317,13 +317,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); return ret; @@ -347,8 +349,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -381,7 +384,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG, + MBOX_TOS_READY_MASK, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -395,7 +399,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, + 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); @@ -421,8 +426,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -533,7 +539,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); vfree(buf); drm_dev_exit(idx); } else { @@ -601,7 +607,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -638,7 +644,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c index 5697760a819b..93787a90d598 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c @@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, @@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index fcd708eae75c..4c6450d62299 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -34,9 +34,6 @@ #include "sdma0/sdma0_4_0_offset.h" #include "nbio/nbio_7_4_offset.h" -#include "oss/osssys_4_0_offset.h" -#include "oss/osssys_4_0_sh_mask.h" - MODULE_FIRMWARE("amdgpu/renoir_asd.bin"); MODULE_FIRMWARE("amdgpu/renoir_ta.bin"); MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin"); @@ -85,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -99,11 +96,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, psp_gfxdrv_command_reg); - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); return ret; } @@ -124,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -138,46 +132,13 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, psp_gfxdrv_command_reg); - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } -static void psp_v12_0_reroute_ih(struct psp_context *psp) -{ - struct amdgpu_device *adev = psp->adev; - uint32_t tmp; - - /* Change IH ring for VMC */ - tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); - - mdelay(20); - psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); - - /* Change IH ring for UMC */ - tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); - - mdelay(20); - psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); -} - static int psp_v12_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -186,49 +147,23 @@ static int psp_v12_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - psp_v12_0_reroute_ih(psp); - - if (amdgpu_sriov_vf(psp->adev)) { - /* Write low address of the ring to C2PMSG_102 */ - psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); - /* Write high address of the ring to C2PMSG_103 */ - psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg); - - /* Write the ring initialization command to C2PMSG_101 */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_INIT_GPCOM_RING); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); - - } else { - /* Write low address of the ring to C2PMSG_69 */ - psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); - /* Write high address of the ring to C2PMSG_70 */ - psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); - /* Write size of ring to C2PMSG_71 */ - psp_ring_reg = ring->ring_size; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); - /* Write the ring initialization command to C2PMSG_64 */ - psp_ring_reg = ring_type; - psp_ring_reg = psp_ring_reg << 16; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); - } + /* Write low address of the ring to C2PMSG_69 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); + /* Write high address of the ring to C2PMSG_70 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); + /* Write size of ring to C2PMSG_71 */ + psp_ring_reg = ring->ring_size; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); + /* Write the ring initialization command to C2PMSG_64 */ + psp_ring_reg = ring_type; + psp_ring_reg = psp_ring_reg << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -247,16 +182,15 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS); - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -287,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG, + MBOX_TOS_READY_MASK, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -301,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, + 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index cc621064610f..af4a7d7c4abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); @@ -71,20 +73,13 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin"); /* Retry times for vmbx ready wait */ #define PSP_VMBX_POLLING_LIMIT 3000 -/* VBIOS gfl defines */ -#define MBOX_READY_MASK 0x80000000 -#define MBOX_STATUS_MASK 0x0000FFFF -#define MBOX_COMMAND_MASK 0x00FF0000 -#define MBOX_READY_FLAG 0x80000000 -#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2 -#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3 -#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4 - /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 #define regMP1_PUB_SCRATCH0 0x3b10090 +#define PSP13_BL_STATUS_SIZE 100 + static int psp_v13_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -151,6 +146,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp) return sol_reg != 0x0; } +static void psp_v13_0_bootloader_print_status(struct psp_context *psp, + const char *msg) +{ + struct amdgpu_device *adev = psp->adev; + u32 bl_status_reg; + char bl_status_msg[PSP13_BL_STATUS_SIZE]; + int i, at; + + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { + at = 0; + for_each_inst(i, adev->aid_mask) { + bl_status_reg = + (SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92) + << 2) + + adev->asic_funcs->encode_ext_smn_addressing(i); + at += snprintf(bl_status_msg + at, + PSP13_BL_STATUS_SIZE - at, + " status(%02i): 0x%08x", i, + RREG32_PCIE_EXT(bl_status_reg)); + } + dev_info(adev->dev, "%s - %s", msg, bl_status_msg); + } +} + static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -161,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp) ready having bit 31 of C2PMSG_33 set to 1 */ ret = psp_wait_for( psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33), - 0x80000000, 0xffffffff, false); + 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; @@ -192,10 +213,13 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) { ret = psp_wait_for( psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0xffffffff, false); + 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; + if (retry_loop && !(retry_loop % 10)) + psp_v13_0_bootloader_print_status( + psp, "Waiting for bootloader completion"); } return ret; @@ -338,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); if (!ret) psp_v13_0_init_sos_version(psp); @@ -360,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, @@ -369,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -406,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -436,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -500,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -610,7 +640,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); vfree(buf); drm_dev_exit(idx); } else { @@ -653,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -690,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36); @@ -710,12 +740,14 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd) /* Ring the doorbell */ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1); - if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE) + if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE || + cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); return ret; @@ -739,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp, /* Confirm PSP is ready to start */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; @@ -766,6 +798,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp, return 0; } +static int psp_v13_0_dump_spirom(struct psp_context *psp, + uint64_t fw_pri_mc_addr) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + /* Confirm PSP is ready to start */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); + if (ret) { + dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); + return ret; + } + + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr)); + + ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO); + if (ret) + return ret; + + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr)); + + ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI); + if (ret) + return ret; + + ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE); + + return ret; +} + static int psp_v13_0_vbflash_status(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -858,6 +921,26 @@ static bool psp_v13_0_is_reload_needed(struct psp_context *psp) return false; } +static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val, + enum psp_reg_prog_id id) +{ + struct amdgpu_device *adev = psp->adev; + int ret = -EOPNOTSUPP; + + /* PSP will broadcast the value to all instances */ + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_GBR_IH_SET); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val); + + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, 0); + } + + return ret; +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -879,11 +962,13 @@ static const struct psp_funcs psp_v13_0_funcs = { .load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw, .read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw, .update_spirom = psp_v13_0_update_spirom, + .dump_spirom = psp_v13_0_dump_spirom, .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required, .is_reload_needed = psp_v13_0_is_reload_needed, + .reg_program_no_ring = psp_v13_0_reg_program_no_ring, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c index eaa5512a21da..5f39a2edcc95 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c @@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, @@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 7c49c3f3c388..38dfc5c19f2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -34,7 +34,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin"); @@ -109,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -228,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), - 0, true); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), + RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -248,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, @@ -257,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -294,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -324,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -388,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -498,7 +505,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops) } memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); vfree(buf); drm_dev_exit(idx); } else { @@ -540,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc */ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -577,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36); @@ -602,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); return ret; @@ -629,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp, int ret; /* Confirm PSP is ready to start */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index f6b75e3e47ff..833830bc3e2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); return ret; } @@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp) mdelay(20); psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + 0x80000000, 0x8000FFFF, 0); /* Change IH ring for UMC */ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); @@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp) mdelay(20); psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + 0x80000000, 0x8000FFFF, 0); } static int psp_v3_1_ring_create(struct psp_context *psp, @@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, - mmMP0_SMN_C2PMSG_101), 0x80000000, - 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, 0); } else { /* Write low address of the ring to C2PMSG_69 */ @@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, - mmMP0_SMN_C2PMSG_64), 0x80000000, - 0x8000FFFF, false); - + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, 0); } return ret; } @@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, 0); return ret; } @@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 33ed2b158fcd..f38004e6064e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev, dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n", task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + task_info->task.comm, task_info->task.pid); amdgpu_vm_put_task_info(task_info); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index dc94d58d33a6..36b1ca73c2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -31,7 +31,6 @@ #include "amdgpu_ucode.h" #include "amdgpu_trace.h" #include "amdgpu_reset.h" -#include "gc/gc_9_0_sh_mask.h" #include "sdma/sdma_4_4_2_offset.h" #include "sdma/sdma_4_4_2_sh_mask.h" @@ -46,6 +45,7 @@ #include "amdgpu_ras.h" MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); +MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin"); MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin"); static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = { @@ -107,8 +107,11 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev); -static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev); +static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring); +static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring); +static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev, + u32 instance_id); static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) @@ -490,7 +493,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 doorbell_offset, doorbell; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, sdma_cntl; int i; for_each_inst(i, inst_mask) { @@ -502,6 +505,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0); + WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl); if (sdma[i]->use_doorbell) { doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); @@ -995,6 +1001,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, /* set utc l1 enable flag always to 1 */ temp = RREG32_SDMA(i, regSDMA_CNTL); temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1); + WREG32_SDMA(i, regSDMA_CNTL, temp); if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) { /* enable context empty interrupt during initialization */ @@ -1291,71 +1298,21 @@ static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) seq, 0xffffffff, 4); } -/* - * sdma_v4_4_2_get_invalidate_req - Construct the VM_INVALIDATE_ENG0_REQ register value - * @vmid: The VMID to invalidate - * @flush_type: The type of flush (0 = legacy, 1 = lightweight, 2 = heavyweight) - * - * This function constructs the VM_INVALIDATE_ENG0_REQ register value for the specified VMID - * and flush type. It ensures that all relevant page table cache levels (L1 PTEs, L2 PTEs, and - * L2 PDEs) are invalidated. - */ -static uint32_t sdma_v4_4_2_get_invalidate_req(unsigned int vmid, - uint32_t flush_type) -{ - u32 req = 0; - - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vmid); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); - return req; -} - -/* - * sdma_v4_4_2_ring_emit_vm_flush - Emit VM flush commands for SDMA - * @ring: The SDMA ring - * @vmid: The VMID to flush - * @pd_addr: The page directory address +/** + * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address * - * This function emits the necessary register writes and waits to perform a VM flush for the - * specified VMID. It updates the PTB address registers and issues a VM invalidation request - * using the specified VM invalidation engine. + * Update the page table base and flush the VM TLB + * using sDMA. */ static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vmid, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { - struct amdgpu_device *adev = ring->adev; - uint32_t req = sdma_v4_4_2_get_invalidate_req(vmid, 0); - unsigned int eng = ring->vm_inv_eng; - struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; - - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + - (hub->ctx_addr_distance * vmid), - lower_32_bits(pd_addr)); - - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + - (hub->ctx_addr_distance * vmid), - upper_32_bits(pd_addr)); - /* - * Construct and emit the VM invalidation packet - */ - amdgpu_ring_write(ring, - SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_VM_INVALIDATE) | - SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATE) | - SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(0x1f) | - SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(0x1f) | - SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(eng)); - amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(req)); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(BIT(vmid))); + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); } static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring, @@ -1384,6 +1341,12 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) } } +static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = { + .stop_kernel_queue = &sdma_v4_4_2_stop_queue, + .start_kernel_queue = &sdma_v4_4_2_restore_queue, + .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine, +}; + static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1402,8 +1365,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) sdma_v4_4_2_set_vm_pte_funcs(adev); sdma_v4_4_2_set_irq_funcs(adev); sdma_v4_4_2_set_ras_funcs(adev); - sdma_v4_4_2_set_engine_reset_funcs(adev); - return 0; } @@ -1498,6 +1459,7 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block) /* Initialize guilty flags for GFX and PAGE queues */ adev->sdma.instance[i].gfx_guilty = false; adev->sdma.instance[i].page_guilty = false; + adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1694,46 +1656,26 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0; } -static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring) +static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - uint32_t instance_id = ring->me; - - return sdma_v4_4_2_is_queue_selected(adev, instance_id, false); -} - -static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t instance_id = ring->me; - - if (!adev->sdma.has_page_queue) - return false; - - return sdma_v4_4_2_is_queue_selected(adev, instance_id, true); -} - -static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - u32 id = GET_INST(SDMA0, ring->me); + u32 id = ring->me; int r; - if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - amdgpu_amdkfd_suspend(adev, false); - r = amdgpu_sdma_reset_engine(adev, id); - amdgpu_amdkfd_resume(adev, false); - + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, id, false); + amdgpu_amdkfd_resume(adev, true); return r; } -static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id) +static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; + u32 instance_id = ring->me; u32 inst_mask; uint64_t rptr; - struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring; if (amdgpu_sriov_vf(adev)) return -EINVAL; @@ -1766,11 +1708,11 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_ return 0; } -static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id) +static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring) { - int i; + struct amdgpu_device *adev = ring->adev; u32 inst_mask; - struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring; + int i, r; inst_mask = 1 << ring->me; udelay(50); @@ -1787,17 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instan return -ETIMEDOUT; } - return sdma_v4_4_2_inst_start(adev, inst_mask, true); -} + r = sdma_v4_4_2_inst_start(adev, inst_mask, true); -static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = { - .pre_reset = sdma_v4_4_2_stop_queue, - .post_reset = sdma_v4_4_2_restore_queue, -}; + return r; +} -static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev) +static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev, + u32 instance_id) { - amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs); + /* For SDMA 4.x, use the existing DPM interface for backward compatibility + * we need to convert the logical instance ID to physical instance ID before reset. + */ + return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id)); } static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev, @@ -1939,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev, if (task_info) { dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n", task_info->process_name, task_info->tgid, - task_info->task_name, task_info->pid); + task_info->task.comm, task_info->task.pid); amdgpu_vm_put_task_info(task_info); } @@ -2177,7 +2120,8 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { 3 + /* hdp invalidate */ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ /* sdma_v4_4_2_ring_emit_vm_flush */ - 4 + 2 * 3 + + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ .emit_ib = sdma_v4_4_2_ring_emit_ib, @@ -2193,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, .reset = sdma_v4_4_2_reset_queue, - .is_guilty = sdma_v4_4_2_ring_is_guilty, }; static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { @@ -2209,7 +2152,8 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { 3 + /* hdp invalidate */ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ /* sdma_v4_4_2_ring_emit_vm_flush */ - 4 + 2 * 3 + + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ .emit_ib = sdma_v4_4_2_ring_emit_ib, @@ -2225,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, .reset = sdma_v4_4_2_reset_queue, - .is_guilty = sdma_v4_4_2_page_ring_is_guilty, }; static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) @@ -2422,7 +2365,9 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(9, 5, 0): - /*TODO: enable the queue reset flag until fw supported */ + if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev)) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + break; default: break; } @@ -2595,7 +2540,7 @@ static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_ban 1ULL); break; case ACA_SMU_TYPE_CE: - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); + bank->aca_err_type = ACA_ERROR_TYPE_CE; ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, ACA_REG__MISC0__ERRCNT(misc0)); break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 0dce59f4f6e2..7dc67a22a7a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev); +static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring); +static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring); static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), @@ -369,67 +371,36 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size; DRM_DEBUG("Setting write pointer\n"); - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - AMDGPU_MES_PRIORITY_LEVEL_NORMAL); - + if (ring->use_doorbell) { + DRM_DEBUG("Using doorbell -- " + "wptr_offs == 0x%08x " + "lower_32_bits(ring->wptr) << 2 == 0x%08x " + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + /* XXX check if swapping is necessary on BE */ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); - *wptr_saved = ring->wptr << 2; - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, ring->wptr << 2); - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } else { - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - - if (*is_queue_unmap) - WDOORBELL64(aggregated_db_index, - ring->wptr << 2); - } + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", + ring->doorbell_index, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); } else { - if (ring->use_doorbell) { - DRM_DEBUG("Using doorbell -- " - "wptr_offs == 0x%08x " - "lower_32_bits(ring->wptr) << 2 == 0x%08x " - "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", - ring->wptr_offs, - lower_32_bits(ring->wptr << 2), - upper_32_bits(ring->wptr << 2)); - /* XXX check if swapping is necessary on BE */ - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr << 2); - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } else { - DRM_DEBUG("Not using doorbell -- " - "mmSDMA%i_GFX_RB_WPTR == 0x%08x " - "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", - ring->me, - lower_32_bits(ring->wptr << 2), - ring->me, - upper_32_bits(ring->wptr << 2)); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, - ring->me, mmSDMA0_GFX_RB_WPTR), - lower_32_bits(ring->wptr << 2)); - WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, - ring->me, mmSDMA0_GFX_RB_WPTR_HI), - upper_32_bits(ring->wptr << 2)); - } + DRM_DEBUG("Not using doorbell -- " + "mmSDMA%i_GFX_RB_WPTR == 0x%08x " + "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", + ring->me, + lower_32_bits(ring->wptr << 2), + ring->me, + upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, + ring->me, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, + ring->me, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); } } @@ -575,11 +546,9 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } if (flags & AMDGPU_FENCE_FLAG_INT) { - uint32_t ctx = ring->is_mes_queue ? - (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0; /* generate an interrupt */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); - amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } } @@ -588,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se * sdma_v5_0_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer - * + * @inst_mask: mask of dma engine instances to be disabled * Stop the gfx async dma ring buffers (NAVI10). */ -static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) +static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask) { u32 rb_cntl, ib_cntl; int i; - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); @@ -688,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; + uint32_t inst_mask; + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); if (!enable) { - sdma_v5_0_gfx_stop(adev); + sdma_v5_0_gfx_stop(adev, 1 << inst_mask); sdma_v5_0_rlc_stop(adev); } @@ -1046,33 +1017,22 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) int r; u32 tmp; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); - return r; - } - - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; } + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_ring_alloc(ring, 20); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1085,10 +1045,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; if (amdgpu_emu_mode == 1) @@ -1100,8 +1057,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1124,38 +1080,24 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; u32 tmp = 0; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; memset(&ib, 0, sizeof(ib)); - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); - ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + return r; + } - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(adev, NULL, 256, - AMDGPU_IB_POOL_DIRECT, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); - goto err0; - } + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err0; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | @@ -1183,10 +1125,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) r = 0; @@ -1197,8 +1136,7 @@ err1: amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1389,6 +1327,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } +static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id) +{ + u32 grbm_soft_reset; + u32 tmp; + + grbm_soft_reset = REG_SET_FIELD(0, + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, + 1); + grbm_soft_reset <<= instance_id; + + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + return 0; +} + +static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = { + .stop_kernel_queue = &sdma_v5_0_stop_queue, + .start_kernel_queue = &sdma_v5_0_restore_queue, + .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine, +}; + static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -1431,6 +1399,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) return r; for (i = 0; i < adev->sdma.num_instances; i++) { + mutex_init(&adev->sdma.instance[i].engine_reset_mutex); + adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -1458,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 5): - if (adev->sdma.instance[0].fw_version >= 35) + if ((adev->sdma.instance[0].fw_version >= 35) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1569,35 +1540,43 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) return 0; } -static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, j, r; - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, ring->me, true); + amdgpu_amdkfd_resume(adev, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + +static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring) +{ + u32 f32_cntl, freeze, cntl, stat1_reg; + struct amdgpu_device *adev = ring->adev; + int i, j, r = 0; + + if (amdgpu_sriov_vf(adev)) return -EINVAL; - } + i = ring->me; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* stop queue */ - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + sdma_v5_0_gfx_stop(adev, 1 << i); /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); @@ -1628,31 +1607,27 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} - /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ - preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); - preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); - - soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; - - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); - - udelay(50); - - soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); +static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 inst_id = ring->me; + u32 freeze; + int r; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* unfreeze*/ - freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE)); freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); - - r = sdma_v5_0_gfx_resume_instance(adev, i, true); + WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze); -err0: + r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true); amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 2b39a03ff0c1..3bd44c24f692 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev); +static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring); +static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring); static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) { @@ -394,11 +396,9 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } if ((flags & AMDGPU_FENCE_FLAG_INT)) { - uint32_t ctx = ring->is_mes_queue ? - (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0; /* generate an interrupt */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); - amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } } @@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se * sdma_v5_2_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer - * + * @inst_mask: mask of dma engine instances to be disabled * Stop the gfx async dma ring buffers. */ -static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) +static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask) { u32 rb_cntl, ib_cntl; int i; - for (i = 0; i < adev->sdma.num_instances; i++) { + for_each_inst(i, inst_mask) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); @@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; + uint32_t inst_mask; + inst_mask = GENMASK(adev->sdma.num_instances - 1, 0); if (!enable) { - sdma_v5_2_gfx_stop(adev); + sdma_v5_2_gfx_stop(adev, inst_mask); sdma_v5_2_rlc_stop(adev); } @@ -761,37 +763,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) return 0; } -static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) +static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id) { - struct amdgpu_device *adev = ip_block->adev; u32 grbm_soft_reset; u32 tmp; - int i; - for (i = 0; i < adev->sdma.num_instances; i++) { - grbm_soft_reset = REG_SET_FIELD(0, - GRBM_SOFT_RESET, SOFT_RESET_SDMA0, - 1); - grbm_soft_reset <<= i; + grbm_soft_reset = REG_SET_FIELD(0, + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, + 1); + grbm_soft_reset <<= instance_id; - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~grbm_soft_reset; - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); - tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + return 0; +} +static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma_v5_2_soft_reset_engine(adev, i); udelay(50); } return 0; } +static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = { + .stop_kernel_queue = &sdma_v5_2_stop_queue, + .start_kernel_queue = &sdma_v5_2_restore_queue, + .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine, +}; + /** * sdma_v5_2_start - setup and start the async dma engines * @@ -903,33 +917,22 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring) int r; u32 tmp; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); - return r; - } - - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; } + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_ring_alloc(ring, 20); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -942,10 +945,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; if (amdgpu_emu_mode == 1) @@ -957,8 +957,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -981,37 +980,23 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; u32 tmp = 0; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; memset(&ib, 0, sizeof(ib)); - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); - ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + return r; + } - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); - goto err0; - } + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err0; } ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | @@ -1039,10 +1024,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) r = 0; @@ -1053,8 +1035,7 @@ err1: amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1337,6 +1318,8 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) } for (i = 0; i < adev->sdma.num_instances; i++) { + mutex_init(&adev->sdma.instance[i].engine_reset_mutex); + adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -1364,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 2, 2): case IP_VERSION(5, 2, 3): case IP_VERSION(5, 2, 4): - if (adev->sdma.instance[0].fw_version >= 76) + if ((adev->sdma.instance[0].fw_version >= 76) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(5, 2, 5): - if (adev->sdma.instance[0].fw_version >= 34) + if ((adev->sdma.instance[0].fw_version >= 34) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1469,35 +1454,43 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) return -ETIMEDOUT; } -static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, j, r; - u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, ring->me, true); + amdgpu_amdkfd_resume(adev, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + +static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring) +{ + u32 f32_cntl, freeze, cntl, stat1_reg; + struct amdgpu_device *adev = ring->adev; + int i, j, r = 0; + + if (amdgpu_sriov_vf(adev)) return -EINVAL; - } + i = ring->me; amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* stop queue */ - ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); - - rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + sdma_v5_2_gfx_stop(adev, 1 << i); /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); @@ -1530,32 +1523,28 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); - /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ - preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); - preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); - - soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; - - - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); - - udelay(50); - - soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); +static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 inst_id = ring->me; + u32 freeze; + int r; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* unfreeze and unhalt */ - freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE)); freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); + WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze); - r = sdma_v5_2_gfx_resume_instance(adev, i, true); + r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true); -err0: amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index c214c3d2149b..e6d8eddda2bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -43,6 +43,8 @@ #include "sdma_common.h" #include "sdma_v6_0.h" #include "v11_structs.h" +#include "mes_userqueue.h" +#include "amdgpu_userq_fence.h" MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin"); MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin"); @@ -376,11 +378,9 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } if (flags & AMDGPU_FENCE_FLAG_INT) { - uint32_t ctx = ring->is_mes_queue ? - (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0; /* generate an interrupt */ amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP)); - amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } } @@ -891,6 +891,12 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd, m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT; m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT; + m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr); + m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr); + + m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address); + m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address); + return 0; } @@ -917,33 +923,22 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) int r; u32 tmp; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); - return r; - } - - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; } + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -956,10 +951,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; if (amdgpu_emu_mode == 1) @@ -971,8 +963,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -995,37 +986,23 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; u32 tmp = 0; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; memset(&ib, 0, sizeof(ib)); - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); - ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + return r; + } - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); - goto err0; - } + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err0; } ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | @@ -1053,10 +1030,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) r = 0; @@ -1067,8 +1041,7 @@ err1: amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1300,6 +1273,23 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int r; + switch (amdgpu_user_queue) { + case -1: + case 0: + default: + adev->sdma.no_user_submission = false; + adev->sdma.disable_uq = true; + break; + case 1: + adev->sdma.no_user_submission = false; + adev->sdma.disable_uq = false; + break; + case 2: + adev->sdma.no_user_submission = true; + adev->sdma.disable_uq = false; + break; + } + r = amdgpu_sdma_init_microcode(adev, 0, true); if (r) return r; @@ -1329,11 +1319,19 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + /* SDMA user fence event */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, + GFX_11_0_0__SRCID__SDMA_FENCE, + &adev->sdma.fence_irq); + if (r) + return r; + for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; ring->me = i; + ring->no_user_submission = adev->sdma.no_user_submission; DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); @@ -1357,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): - if (adev->sdma.instance[0].fw_version >= 21) + if ((adev->sdma.instance[0].fw_version >= 21) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1376,6 +1375,43 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(6, 0, 0): + if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 1): + if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 2): + if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 3): + if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 0): + if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 1): + if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 2): + if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 3): + if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + default: + break; + } + r = amdgpu_sdma_sysfs_reset_mask_init(adev); if (r) return r; @@ -1399,11 +1435,39 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block) return 0; } +static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev, + bool enable) +{ + unsigned int irq_type; + int i, r; + + if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) { + for (i = 0; i < adev->sdma.num_instances; i++) { + irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i; + if (enable) + r = amdgpu_irq_get(adev, &adev->sdma.trap_irq, + irq_type); + else + r = amdgpu_irq_put(adev, &adev->sdma.trap_irq, + irq_type); + if (r) + return r; + } + } + + return 0; +} + static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; - return sdma_v6_0_start(adev); + r = sdma_v6_0_start(adev); + if (r) + return r; + + return sdma_v6_0_set_userq_trap_interrupts(adev, true); } static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) @@ -1415,6 +1479,7 @@ static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block) sdma_v6_0_ctxempty_int_enable(adev, false); sdma_v6_0_enable(adev, false); + sdma_v6_0_set_userq_trap_interrupts(adev, false); return 0; } @@ -1506,29 +1571,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } -static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, r; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); if (r) return r; - return sdma_v6_0_gfx_resume_instance(adev, i, true); + r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, @@ -1555,25 +1620,9 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { int instances, queue; - uint32_t mes_queue_id = entry->src_data[0]; DRM_DEBUG("IH: SDMA trap\n"); - if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { - struct amdgpu_mes_queue *queue; - - mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; - - spin_lock(&adev->mes.queue_id_lock); - queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); - if (queue) { - DRM_DEBUG("process smda queue id = %d\n", mes_queue_id); - amdgpu_fence_process(queue->ring); - } - spin_unlock(&adev->mes.queue_id_lock); - return 0; - } - queue = entry->ring_id & 0xf; instances = (entry->ring_id & 0xf0) >> 4; if (instances > 1) { @@ -1595,6 +1644,29 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev, return 0; } +static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u32 doorbell_offset = entry->src_data[0]; + + if (adev->enable_mes && doorbell_offset) { + struct amdgpu_userq_fence_driver *fence_drv = NULL; + struct xarray *xa = &adev->userq_xa; + unsigned long flags; + + doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; + + xa_lock_irqsave(xa, flags); + fence_drv = xa_load(xa, doorbell_offset); + if (fence_drv) + amdgpu_userq_fence_driver_process(fence_drv); + xa_unlock_irqrestore(xa, flags); + } + + return 0; +} + static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1731,6 +1803,10 @@ static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = { .process = sdma_v6_0_process_trap_irq, }; +static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = { + .process = sdma_v6_0_process_fence_irq, +}; + static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = { .process = sdma_v6_0_process_illegal_inst_irq, }; @@ -1740,6 +1816,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev) adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + adev->sdma.num_instances; adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs; + adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index b2706221df99..b8b06d4c5882 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -33,7 +33,7 @@ #include "gc/gc_12_0_0_offset.h" #include "gc/gc_12_0_0_sh_mask.h" #include "hdp/hdp_6_0_0_offset.h" -#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" +#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h" #include "soc15_common.h" #include "soc15.h" @@ -42,6 +42,8 @@ #include "sdma_common.h" #include "sdma_v7_0.h" #include "v12_structs.h" +#include "mes_userqueue.h" +#include "amdgpu_userq_fence.h" MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin"); MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin"); @@ -204,66 +206,39 @@ static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring) static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t *wptr_saved; - uint32_t *is_queue_unmap; - uint64_t aggregated_db_index; - uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size; DRM_DEBUG("Setting write pointer\n"); - if (ring->is_mes_queue) { - wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); - is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + - sizeof(uint32_t)); - aggregated_db_index = - amdgpu_mes_get_aggregated_doorbell_index(adev, - ring->hw_prio); - + if (ring->use_doorbell) { + DRM_DEBUG("Using doorbell -- " + "wptr_offs == 0x%08x " + "lower_32_bits(ring->wptr) << 2 == 0x%08x " + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + /* XXX check if swapping is necessary on BE */ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2); - *wptr_saved = ring->wptr << 2; - if (*is_queue_unmap) { - WDOORBELL64(aggregated_db_index, ring->wptr << 2); - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } else { - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", + ring->doorbell_index, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); } else { - if (ring->use_doorbell) { - DRM_DEBUG("Using doorbell -- " - "wptr_offs == 0x%08x " - "lower_32_bits(ring->wptr) << 2 == 0x%08x " - "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", - ring->wptr_offs, - lower_32_bits(ring->wptr << 2), - upper_32_bits(ring->wptr << 2)); - /* XXX check if swapping is necessary on BE */ - atomic64_set((atomic64_t *)ring->wptr_cpu_addr, - ring->wptr << 2); - DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", - ring->doorbell_index, ring->wptr << 2); - WDOORBELL64(ring->doorbell_index, ring->wptr << 2); - } else { - DRM_DEBUG("Not using doorbell -- " - "regSDMA%i_GFX_RB_WPTR == 0x%08x " - "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", - ring->me, - lower_32_bits(ring->wptr << 2), - ring->me, - upper_32_bits(ring->wptr << 2)); - WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, - ring->me, - regSDMA0_QUEUE0_RB_WPTR), - lower_32_bits(ring->wptr << 2)); - WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, - ring->me, - regSDMA0_QUEUE0_RB_WPTR_HI), - upper_32_bits(ring->wptr << 2)); - } + DRM_DEBUG("Not using doorbell -- " + "regSDMA%i_GFX_RB_WPTR == 0x%08x " + "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", + ring->me, + lower_32_bits(ring->wptr << 2), + ring->me, + upper_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, + ring->me, + regSDMA0_QUEUE0_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, + ring->me, + regSDMA0_QUEUE0_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); } } @@ -407,11 +382,9 @@ static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } if (flags & AMDGPU_FENCE_FLAG_INT) { - uint32_t ctx = ring->is_mes_queue ? - (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0; /* generate an interrupt */ amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP)); - amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } } @@ -829,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block) return false; } -static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, r; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); if (r) return r; - return sdma_v7_0_gfx_resume_instance(adev, i, true); + r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } /** @@ -935,6 +908,12 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd, m->sdmax_rlcx_rb_aql_cntl = 0x4000; //regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT; m->sdmax_rlcx_dummy_reg = 0xf; //regSDMA0_QUEUE0_DUMMY_REG_DEFAULT; + m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr); + m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr); + + m->sdmax_rlcx_mcu_dbg0 = lower_32_bits(prop->fence_address); + m->sdmax_rlcx_mcu_dbg1 = upper_32_bits(prop->fence_address); + return 0; } @@ -961,33 +940,22 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring) int r; u32 tmp; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); - return r; - } - - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; } + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); + r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1000,10 +968,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) break; if (amdgpu_emu_mode == 1) @@ -1015,8 +980,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1039,37 +1003,23 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) long r; u32 tmp = 0; u64 gpu_addr; - volatile uint32_t *cpu_ptr = NULL; tmp = 0xCAFEDEAD; memset(&ib, 0, sizeof(ib)); - if (ring->is_mes_queue) { - uint32_t offset = 0; - offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); - ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - - offset = amdgpu_mes_ctx_get_offs(ring, - AMDGPU_MES_CTX_PADDING_OFFS); - gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); - cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); - *cpu_ptr = tmp; - } else { - r = amdgpu_device_wb_get(adev, &index); - if (r) { - dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); - return r; - } + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + return r; + } - gpu_addr = adev->wb.gpu_addr + (index * 4); - adev->wb.wb[index] = cpu_to_le32(tmp); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(tmp); - r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); - goto err0; - } + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err0; } ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | @@ -1097,10 +1047,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } - if (ring->is_mes_queue) - tmp = le32_to_cpu(*cpu_ptr); - else - tmp = le32_to_cpu(adev->wb.wb[index]); + tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) r = 0; @@ -1111,8 +1058,7 @@ err1: amdgpu_ib_free(&ib, NULL); dma_fence_put(f); err0: - if (!ring->is_mes_queue) - amdgpu_device_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -1312,6 +1258,23 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int r; + switch (amdgpu_user_queue) { + case -1: + case 0: + default: + adev->sdma.no_user_submission = false; + adev->sdma.disable_uq = true; + break; + case 1: + adev->sdma.no_user_submission = false; + adev->sdma.disable_uq = false; + break; + case 2: + adev->sdma.no_user_submission = true; + adev->sdma.disable_uq = false; + break; + } + r = amdgpu_sdma_init_microcode(adev, 0, true); if (r) { DRM_ERROR("Failed to init sdma firmware!\n"); @@ -1337,16 +1300,24 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) /* SDMA trap event */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, - GFX_11_0_0__SRCID__SDMA_TRAP, + GFX_12_0_0__SRCID__SDMA_TRAP, &adev->sdma.trap_irq); if (r) return r; + /* SDMA user fence event */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, + GFX_12_0_0__SRCID__SDMA_FENCE, + &adev->sdma.fence_irq); + if (r) + return r; + for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; ring->me = i; + ring->no_user_submission = adev->sdma.no_user_submission; DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); @@ -1366,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_sdma_sysfs_reset_mask_init(adev); if (r) @@ -1378,6 +1350,16 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + default: + break; + } + return r; } @@ -1400,11 +1382,39 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) return 0; } +static int sdma_v7_0_set_userq_trap_interrupts(struct amdgpu_device *adev, + bool enable) +{ + unsigned int irq_type; + int i, r; + + if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) { + for (i = 0; i < adev->sdma.num_instances; i++) { + irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i; + if (enable) + r = amdgpu_irq_get(adev, &adev->sdma.trap_irq, + irq_type); + else + r = amdgpu_irq_put(adev, &adev->sdma.trap_irq, + irq_type); + if (r) + return r; + } + } + + return 0; +} + static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int r; - return sdma_v7_0_start(adev); + r = sdma_v7_0_start(adev); + if (r) + return r; + + return sdma_v7_0_set_userq_trap_interrupts(adev, true); } static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) @@ -1416,6 +1426,7 @@ static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) sdma_v7_0_ctx_switch_enable(adev, false); sdma_v7_0_enable(adev, false); + sdma_v7_0_set_userq_trap_interrupts(adev, false); return 0; } @@ -1533,25 +1544,9 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { int instances, queue; - uint32_t mes_queue_id = entry->src_data[0]; DRM_DEBUG("IH: SDMA trap\n"); - if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { - struct amdgpu_mes_queue *queue; - - mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; - - spin_lock(&adev->mes.queue_id_lock); - queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); - if (queue) { - DRM_DEBUG("process smda queue id = %d\n", mes_queue_id); - amdgpu_fence_process(queue->ring); - } - spin_unlock(&adev->mes.queue_id_lock); - return 0; - } - queue = entry->ring_id & 0xf; instances = (entry->ring_id & 0xf0) >> 4; if (instances > 1) { @@ -1573,6 +1568,29 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev, return 0; } +static int sdma_v7_0_process_fence_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u32 doorbell_offset = entry->src_data[0]; + + if (adev->enable_mes && doorbell_offset) { + struct amdgpu_userq_fence_driver *fence_drv = NULL; + struct xarray *xa = &adev->userq_xa; + unsigned long flags; + + doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; + + xa_lock_irqsave(xa, flags); + fence_drv = xa_load(xa, doorbell_offset); + if (fence_drv) + amdgpu_userq_fence_driver_process(fence_drv); + xa_unlock_irqrestore(xa, flags); + } + + return 0; +} + static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1710,6 +1728,10 @@ static const struct amdgpu_irq_src_funcs sdma_v7_0_trap_irq_funcs = { .process = sdma_v7_0_process_trap_irq, }; +static const struct amdgpu_irq_src_funcs sdma_v7_0_fence_irq_funcs = { + .process = sdma_v7_0_process_fence_irq, +}; + static const struct amdgpu_irq_src_funcs sdma_v7_0_illegal_inst_irq_funcs = { .process = sdma_v7_0_process_illegal_inst_irq, }; @@ -1719,6 +1741,7 @@ static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev) adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + adev->sdma.num_instances; adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs; + adev->sdma.fence_irq.funcs = &sdma_v7_0_fence_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 2247f6a94858..e0f139de7991 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -35,6 +35,7 @@ #include "amdgpu_vce.h" #include "atom.h" #include "amd_pcie.h" + #include "si_dpm.h" #include "sid.h" #include "si_ih.h" @@ -44,17 +45,30 @@ #include "dce_v6_0.h" #include "si.h" #include "uvd_v3_1.h" -#include "amdgpu_vkms.h" + +#include "uvd/uvd_4_0_d.h" + +#include "smu/smu_6_0_d.h" +#include "smu/smu_6_0_sh_mask.h" + #include "gca/gfx_6_0_d.h" +#include "gca/gfx_6_0_sh_mask.h" + #include "oss/oss_1_0_d.h" #include "oss/oss_1_0_sh_mask.h" + #include "gmc/gmc_6_0_d.h" +#include"gmc/gmc_6_0_sh_mask.h" + #include "dce/dce_6_0_d.h" -#include "uvd/uvd_4_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" +#include "si_enums.h" #include "amdgpu_dm.h" +#include "amdgpu_vkms.h" static const u32 tahiti_golden_registers[] = { @@ -1071,8 +1085,8 @@ static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg) u32 r; spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(SMC_IND_INDEX_0, (reg)); - r = RREG32(SMC_IND_DATA_0); + WREG32(mmSMC_IND_INDEX_0, (reg)); + r = RREG32(mmSMC_IND_DATA_0); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return r; } @@ -1082,8 +1096,8 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) unsigned long flags; spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(SMC_IND_INDEX_0, (reg)); - WREG32(SMC_IND_DATA_0, (v)); + WREG32(mmSMC_IND_INDEX_0, (reg)); + WREG32(mmSMC_IND_DATA_0, (v)); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } @@ -1110,20 +1124,20 @@ static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) } static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { - {GRBM_STATUS}, + {mmGRBM_STATUS}, {mmGRBM_STATUS2}, {mmGRBM_STATUS_SE0}, {mmGRBM_STATUS_SE1}, {mmSRBM_STATUS}, {mmSRBM_STATUS2}, - {DMA_STATUS_REG + DMA0_REGISTER_OFFSET}, - {DMA_STATUS_REG + DMA1_REGISTER_OFFSET}, + {mmDMA_STATUS_REG + DMA0_REGISTER_OFFSET}, + {mmDMA_STATUS_REG + DMA1_REGISTER_OFFSET}, {mmCP_STAT}, {mmCP_STALLED_STAT1}, {mmCP_STALLED_STAT2}, {mmCP_STALLED_STAT3}, - {GB_ADDR_CONFIG}, - {MC_ARB_RAMCFG}, + {mmGB_ADDR_CONFIG}, + {mmMC_ARB_RAMCFG}, {mmGB_TILE_MODE0}, {mmGB_TILE_MODE1}, {mmGB_TILE_MODE2}, @@ -1156,7 +1170,7 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {mmGB_TILE_MODE29}, {mmGB_TILE_MODE30}, {mmGB_TILE_MODE31}, - {CC_RB_BACKEND_DISABLE, true}, + {mmCC_RB_BACKEND_DISABLE, true}, {mmGC_USER_RB_BACKEND_DISABLE, true}, {mmPA_SC_RASTER_CONFIG, true}, }; @@ -1264,37 +1278,37 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev) u32 rom_cntl; bool r; - bus_cntl = RREG32(R600_BUS_CNTL); + bus_cntl = RREG32(mmBUS_CNTL); if (adev->mode_info.num_crtc) { - d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); - d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); - vga_render_control = RREG32(VGA_RENDER_CONTROL); + d1vga_control = RREG32(mmD1VGA_CONTROL); + d2vga_control = RREG32(mmD2VGA_CONTROL); + vga_render_control = RREG32(mmVGA_RENDER_CONTROL); } rom_cntl = RREG32(R600_ROM_CNTL); /* enable the rom */ - WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); + WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); if (adev->mode_info.num_crtc) { /* Disable VGA mode */ - WREG32(AVIVO_D1VGA_CONTROL, - (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_D2VGA_CONTROL, - (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(VGA_RENDER_CONTROL, - (vga_render_control & C_000300_VGA_VSTATUS_CNTL)); + WREG32(mmD1VGA_CONTROL, + (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | + D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); + WREG32(mmD2VGA_CONTROL, + (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | + D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); + WREG32(mmVGA_RENDER_CONTROL, + (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); } WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); r = amdgpu_read_bios(adev); /* restore regs */ - WREG32(R600_BUS_CNTL, bus_cntl); + WREG32(mmBUS_CNTL, bus_cntl); if (adev->mode_info.num_crtc) { - WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); - WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); - WREG32(VGA_RENDER_CONTROL, vga_render_control); + WREG32(mmD1VGA_CONTROL, d1vga_control); + WREG32(mmD2VGA_CONTROL, d2vga_control); + WREG32(mmVGA_RENDER_CONTROL, vga_render_control); } WREG32(R600_ROM_CNTL, rom_cntl); return r; @@ -1331,23 +1345,24 @@ static void si_set_clk_bypass_mode(struct amdgpu_device *adev) { u32 tmp, i; - tmp = RREG32(CG_SPLL_FUNC_CNTL); - tmp |= SPLL_BYPASS_EN; - WREG32(CG_SPLL_FUNC_CNTL, tmp); + tmp = RREG32(mmCG_SPLL_FUNC_CNTL); + tmp |= CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK; + WREG32(mmCG_SPLL_FUNC_CNTL, tmp); - tmp = RREG32(CG_SPLL_FUNC_CNTL_2); - tmp |= SPLL_CTLREQ_CHG; - WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2); + tmp |= CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK; + WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp); for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) + if (RREG32(mmCG_SPLL_STATUS) & CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK) break; udelay(1); } - tmp = RREG32(CG_SPLL_FUNC_CNTL_2); - tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); - WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2); + tmp &= ~(CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK | + CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK); + WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp); tmp = RREG32(MPLL_CNTL_MODE); tmp &= ~MPLL_MCLK_SEL; @@ -1358,21 +1373,21 @@ static void si_spll_powerdown(struct amdgpu_device *adev) { u32 tmp; - tmp = RREG32(SPLL_CNTL_MODE); - tmp |= SPLL_SW_DIR_CONTROL; - WREG32(SPLL_CNTL_MODE, tmp); + tmp = RREG32(mmSPLL_CNTL_MODE); + tmp |= SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK; + WREG32(mmSPLL_CNTL_MODE, tmp); - tmp = RREG32(CG_SPLL_FUNC_CNTL); - tmp |= SPLL_RESET; - WREG32(CG_SPLL_FUNC_CNTL, tmp); + tmp = RREG32(mmCG_SPLL_FUNC_CNTL); + tmp |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK; + WREG32(mmCG_SPLL_FUNC_CNTL, tmp); - tmp = RREG32(CG_SPLL_FUNC_CNTL); - tmp |= SPLL_SLEEP; - WREG32(CG_SPLL_FUNC_CNTL, tmp); + tmp = RREG32(mmCG_SPLL_FUNC_CNTL); + tmp |= CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK; + WREG32(mmCG_SPLL_FUNC_CNTL, tmp); - tmp = RREG32(SPLL_CNTL_MODE); - tmp &= ~SPLL_SW_DIR_CONTROL; - WREG32(SPLL_CNTL_MODE, tmp); + tmp = RREG32(mmSPLL_CNTL_MODE); + tmp &= ~SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK; + WREG32(mmSPLL_CNTL_MODE, tmp); } static int si_gpu_pci_config_reset(struct amdgpu_device *adev) @@ -1454,14 +1469,14 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state) { uint32_t temp; - temp = RREG32(CONFIG_CNTL); + temp = RREG32(mmCONFIG_CNTL); if (!state) { temp &= ~(1<<0); temp |= (1<<1); } else { temp &= ~(1<<1); } - WREG32(CONFIG_CNTL, temp); + WREG32(mmCONFIG_CNTL, temp); } static u32 si_get_xclk(struct amdgpu_device *adev) @@ -1469,12 +1484,12 @@ static u32 si_get_xclk(struct amdgpu_device *adev) u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - tmp = RREG32(CG_CLKPIN_CNTL_2); - if (tmp & MUX_TCLK_TO_XCLK) + tmp = RREG32(mmCG_CLKPIN_CNTL_2); + if (tmp & CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK) return TCLK; - tmp = RREG32(CG_CLKPIN_CNTL); - if (tmp & XTALIN_DIVIDE) + tmp = RREG32(mmCG_CLKPIN_CNTL); + if (tmp & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK) return reference_clock / 4; return reference_clock; @@ -1519,9 +1534,9 @@ static int si_get_pcie_lanes(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return 0; - link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL); - switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) { + switch ((link_width_cntl & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT) { case LC_LINK_WIDTH_X1: return 1; case LC_LINK_WIDTH_X2: @@ -1568,13 +1583,13 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes) return; } - link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); - link_width_cntl &= ~LC_LINK_WIDTH_MASK; - link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT; - link_width_cntl |= (LC_RECONFIG_NOW | - LC_RECONFIG_ARC_MISSING_ESCAPE); + link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK; + link_width_cntl |= mask << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT; + link_width_cntl |= (PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK | + PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK); - WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, @@ -2018,7 +2033,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = static uint32_t si_get_rev_id(struct amdgpu_device *adev) { - return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) + return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } @@ -2239,9 +2254,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) return; - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); - current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> - LC_CURRENT_DATA_RATE_SHIFT; + speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL); + current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> + PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate == 2) { DRM_INFO("PCIE gen 3 link speeds already enabled\n"); @@ -2268,17 +2283,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); - tmp = RREG32_PCIE(PCIE_LC_STATUS1); - max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; - current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT; + tmp = RREG32_PCIE(ixPCIE_LC_STATUS1); + max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT; + current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT; if (current_lw < max_lw) { - tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); - if (tmp & LC_RENEGOTIATION_SUPPORT) { - tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS); - tmp |= (max_lw << LC_LINK_WIDTH_SHIFT); - tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW; - WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp); + tmp = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL); + if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) { + tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK); + tmp |= (max_lw << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT); + tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK; + WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, tmp); } } @@ -2301,13 +2316,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) PCI_EXP_LNKCTL2, &gpu_cfg2); - tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); - tmp |= LC_SET_QUIESCE; - WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4); + tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; + WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp); - tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); - tmp |= LC_REDO_EQ; - WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4); + tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK; + WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp); mdelay(100); @@ -2333,16 +2348,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) (PCI_EXP_LNKCTL2_ENTER_COMP | PCI_EXP_LNKCTL2_TX_MARGIN)); - tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); - tmp &= ~LC_SET_QUIESCE; - WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4); + tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; + WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp); } } } - speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE; - speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; - WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK; + speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK; + WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl); tmp16 = 0; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -2354,13 +2369,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2, PCI_EXP_LNKCTL2_TLS, tmp16); - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); - speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; - WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL); + speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK; + WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl); for (i = 0; i < adev->usec_timeout; i++) { - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); - if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0) + speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL); + if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0) break; udelay(1); } @@ -2418,121 +2433,121 @@ static void si_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); - data &= ~LC_XMIT_N_FTS_MASK; - data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; + orig = data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL); + data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; + data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data); + WREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL, data); - orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3); - data |= LC_GO_TO_RECOVERY; + orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_CNTL3, data); + WREG32_PCIE_PORT(ixPCIE_LC_CNTL3, data); - orig = data = RREG32_PCIE(PCIE_P_CNTL); - data |= P_IGNORE_EDB_ERR; + orig = data = RREG32_PCIE(ixPCIE_P_CNTL); + data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; if (orig != data) - WREG32_PCIE(PCIE_P_CNTL, data); + WREG32_PCIE(ixPCIE_P_CNTL, data); - orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); - data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK); - data |= LC_PMI_TO_L1_DIS; + orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL); + data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK); + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; if (!disable_l0s) - data |= LC_L0S_INACTIVITY(7); + data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT); if (!disable_l1) { - data |= LC_L1_INACTIVITY(7); - data &= ~LC_PMI_TO_L1_DIS; + data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT); + data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data); if (!disable_plloff_in_l1) { bool clk_req_support; - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); - data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); - data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0); + data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT); if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data); - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1); - data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); - data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1); + data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT); if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0); - data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); - data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0); + data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT); if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1); - data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); - data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1); + data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT); if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data); if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) { - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); - data &= ~PLL_RAMP_UP_TIME_0_MASK; + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0); + data &= ~PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK; if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data); - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1); - data &= ~PLL_RAMP_UP_TIME_1_MASK; + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1); + data &= ~PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK; if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data); - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2); - data &= ~PLL_RAMP_UP_TIME_2_MASK; + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_2); + data &= ~PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK; if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_2, data); - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3); - data &= ~PLL_RAMP_UP_TIME_3_MASK; + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_3); + data &= ~PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK; if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_3, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0); - data &= ~PLL_RAMP_UP_TIME_0_MASK; + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0); + data &= ~PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK; if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1); - data &= ~PLL_RAMP_UP_TIME_1_MASK; + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1); + data &= ~PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK; if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2); - data &= ~PLL_RAMP_UP_TIME_2_MASK; + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_2); + data &= ~PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK; if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_2, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3); - data &= ~PLL_RAMP_UP_TIME_3_MASK; + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_3); + data &= ~PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK; if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_3, data); } - orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); - data &= ~LC_DYN_LANES_PWR_STATE_MASK; - data |= LC_DYN_LANES_PWR_STATE(3); + orig = data = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL); + data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; + data |= (3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT); if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); + WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, data); - orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL); - data &= ~LS2_EXIT_TIME_MASK; + orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_CNTL); + data &= ~PB0_PIF_CNTL__LS2_EXIT_TIME_MASK; if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) - data |= LS2_EXIT_TIME(5); + data |= (5 << PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT); if (orig != data) - si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data); + si_pif_phy0_wreg(adev,ixPB0_PIF_CNTL, data); - orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL); - data &= ~LS2_EXIT_TIME_MASK; + orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_CNTL); + data &= ~PB1_PIF_CNTL__LS2_EXIT_TIME_MASK; if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN)) - data |= LS2_EXIT_TIME(5); + data |= (5 << PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT); if (orig != data) - si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data); + si_pif_phy1_wreg(adev,ixPB1_PIF_CNTL, data); if (!disable_clkreq && !pci_is_root_bus(adev->pdev->bus)) { @@ -2548,64 +2563,64 @@ static void si_program_aspm(struct amdgpu_device *adev) } if (clk_req_support) { - orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2); - data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23; + orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL2); + data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_CNTL2, data); + WREG32_PCIE_PORT(ixPCIE_LC_CNTL2, data); - orig = data = RREG32(THM_CLK_CNTL); - data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK); - data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1); + orig = data = RREG32(mmTHM_CLK_CNTL); + data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK); + data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); if (orig != data) - WREG32(THM_CLK_CNTL, data); + WREG32(mmTHM_CLK_CNTL, data); - orig = data = RREG32(MISC_CLK_CNTL); - data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK); - data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1); + orig = data = RREG32(mmMISC_CLK_CNTL); + data &= ~(MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK | MISC_CLK_CNTL__ZCLK_SEL_MASK); + data |= (1 << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT) | (1 << MISC_CLK_CNTL__ZCLK_SEL__SHIFT); if (orig != data) - WREG32(MISC_CLK_CNTL, data); + WREG32(mmMISC_CLK_CNTL, data); - orig = data = RREG32(CG_CLKPIN_CNTL); - data &= ~BCLK_AS_XCLK; + orig = data = RREG32(mmCG_CLKPIN_CNTL); + data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK; if (orig != data) - WREG32(CG_CLKPIN_CNTL, data); + WREG32(mmCG_CLKPIN_CNTL, data); - orig = data = RREG32(CG_CLKPIN_CNTL_2); - data &= ~FORCE_BIF_REFCLK_EN; + orig = data = RREG32(mmCG_CLKPIN_CNTL_2); + data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK; if (orig != data) - WREG32(CG_CLKPIN_CNTL_2, data); + WREG32(mmCG_CLKPIN_CNTL_2, data); - orig = data = RREG32(MPLL_BYPASSCLK_SEL); - data &= ~MPLL_CLKOUT_SEL_MASK; - data |= MPLL_CLKOUT_SEL(4); + orig = data = RREG32(mmMPLL_BYPASSCLK_SEL); + data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK; + data |= 4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT; if (orig != data) - WREG32(MPLL_BYPASSCLK_SEL, data); + WREG32(mmMPLL_BYPASSCLK_SEL, data); - orig = data = RREG32(SPLL_CNTL_MODE); - data &= ~SPLL_REFCLK_SEL_MASK; + orig = data = RREG32(mmSPLL_CNTL_MODE); + data &= ~SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK; if (orig != data) - WREG32(SPLL_CNTL_MODE, data); + WREG32(mmSPLL_CNTL_MODE, data); } } } else { if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data); } - orig = data = RREG32_PCIE(PCIE_CNTL2); - data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN; + orig = data = RREG32_PCIE(ixPCIE_CNTL2); + data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; if (orig != data) - WREG32_PCIE(PCIE_CNTL2, data); + WREG32_PCIE(ixPCIE_CNTL2, data); if (!disable_l0s) { - data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); - if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) { - data = RREG32_PCIE(PCIE_LC_STATUS1); - if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) { - orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); - data &= ~LC_L0S_INACTIVITY_MASK; + data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL); + if((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) { + data = RREG32_PCIE(ixPCIE_LC_STATUS1); + if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) { + orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; if (orig != data) - WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data); } } } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index e2089c8da71b..7f18e4875287 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -27,6 +27,8 @@ #include "si.h" #include "sid.h" +#include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { DMA0_REGISTER_OFFSET, @@ -38,17 +40,31 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); static void si_dma_set_irq_funcs(struct amdgpu_device *adev); +/** + * si_dma_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware (SI). + */ static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) { return *ring->rptr_cpu_addr; } +/** + * si_dma_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware (SI). + */ static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; - return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; + return (RREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; } static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) @@ -56,7 +72,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; - WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); + WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); } static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, @@ -117,9 +133,9 @@ static void si_dma_stop(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { /* dma0 */ - rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); - rb_cntl &= ~DMA_RB_ENABLE; - WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); + rb_cntl = RREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i]); + rb_cntl &= ~DMA_GFX_RB_CNTL__RB_ENABLE_MASK; + WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); } } @@ -133,44 +149,44 @@ static int si_dma_start(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); - WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); + WREG32(mmDMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); + WREG32(mmDMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); rb_cntl = rb_bufsz << 1; #ifdef __BIG_ENDIAN - rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; + rb_cntl |= DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK; #endif - WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); + WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); /* Initialize the ring buffer's read and write pointers */ - WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); - WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmDMA_GFX_RB_RPTR + sdma_offsets[i], 0); + WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], 0); rptr_addr = ring->rptr_gpu_addr; - WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); - WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); + WREG32(mmDMA_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); + WREG32(mmDMA_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); - rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; + rb_cntl |= DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; - WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); + WREG32(mmDMA_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); /* enable DMA IBs */ - ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; + ib_cntl = DMA_GFX_IB_CNTL__IB_ENABLE_MASK | DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK; #ifdef __BIG_ENDIAN - ib_cntl |= DMA_IB_SWAP_ENABLE; + ib_cntl |= DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK; #endif - WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); + WREG32(mmDMA_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); - dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); - dma_cntl &= ~CTXEMPTY_INT_ENABLE; - WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); + dma_cntl = RREG32(mmDMA_CNTL + sdma_offsets[i]); + dma_cntl &= ~DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK; + WREG32(mmDMA_CNTL + sdma_offsets[i], dma_cntl); ring->wptr = 0; - WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2); - WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); + WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); + WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_GFX_RB_CNTL__RB_ENABLE_MASK); r = amdgpu_ring_test_helper(ring); if (r) @@ -461,7 +477,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - adev->sdma.num_instances = 2; + adev->sdma.num_instances = SDMA_MAX_INSTANCE; si_dma_set_ring_funcs(adev); si_dma_set_buffer_funcs(adev); @@ -545,9 +561,9 @@ static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - u32 tmp = RREG32(SRBM_STATUS2); + u32 tmp = RREG32(mmSRBM_STATUS2); - if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) + if (tmp & (SRBM_STATUS2__DMA_BUSY_MASK | SRBM_STATUS2__DMA1_BUSY_MASK)) return false; return true; @@ -583,14 +599,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); - sdma_cntl &= ~TRAP_ENABLE; - WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); + sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET); + sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK; + WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); - sdma_cntl |= TRAP_ENABLE; - WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); + sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET); + sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK; + WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); break; default: break; @@ -599,14 +615,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); - sdma_cntl &= ~TRAP_ENABLE; - WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); + sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET); + sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK; + WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); - sdma_cntl |= TRAP_ENABLE; - WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); + sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET); + sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK; + WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); break; default: break; @@ -645,11 +661,11 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block, offset = DMA0_REGISTER_OFFSET; else offset = DMA1_REGISTER_OFFSET; - orig = data = RREG32(DMA_POWER_CNTL + offset); - data &= ~MEM_POWER_OVERRIDE; + orig = data = RREG32(mmDMA_POWER_CNTL + offset); + data &= ~DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; if (data != orig) - WREG32(DMA_POWER_CNTL + offset, data); - WREG32(DMA_CLK_CTRL + offset, 0x00000100); + WREG32(mmDMA_POWER_CNTL + offset, data); + WREG32(mmDMA_CLK_CTRL + offset, 0x00000100); } } else { for (i = 0; i < adev->sdma.num_instances; i++) { @@ -657,15 +673,15 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block, offset = DMA0_REGISTER_OFFSET; else offset = DMA1_REGISTER_OFFSET; - orig = data = RREG32(DMA_POWER_CNTL + offset); - data |= MEM_POWER_OVERRIDE; + orig = data = RREG32(mmDMA_POWER_CNTL + offset); + data |= DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; if (data != orig) - WREG32(DMA_POWER_CNTL + offset, data); + WREG32(mmDMA_POWER_CNTL + offset, data); - orig = data = RREG32(DMA_CLK_CTRL + offset); + orig = data = RREG32(mmDMA_CLK_CTRL + offset); data = 0xff000000; if (data != orig) - WREG32(DMA_CLK_CTRL + offset, data); + WREG32(mmDMA_CLK_CTRL + offset, data); } } @@ -679,11 +695,11 @@ static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block, struct amdgpu_device *adev = ip_block->adev; - WREG32(DMA_PGFSM_WRITE, 0x00002000); - WREG32(DMA_PGFSM_CONFIG, 0x100010ff); + WREG32(mmDMA_PGFSM_WRITE, 0x00002000); + WREG32(mmDMA_PGFSM_CONFIG, 0x100010ff); for (tmp = 0; tmp < 5; tmp++) - WREG32(DMA_PGFSM_WRITE, 0); + WREG32(mmDMA_PGFSM_WRITE, 0); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h index d656ef1fa6e1..6da65778292b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h @@ -23,115 +23,15 @@ #ifndef SI_ENUMS_H #define SI_ENUMS_H -#define VBLANK_INT_MASK (1 << 0) -#define DC_HPDx_INT_EN (1 << 16) -#define VBLANK_ACK (1 << 4) -#define VLINE_ACK (1 << 4) - -#define CURSOR_WIDTH 64 -#define CURSOR_HEIGHT 64 - -#define VGA_VSTATUS_CNTL 0xFFFCFFFF #define PRIORITY_MARK_MASK 0x7fff #define PRIORITY_OFF (1 << 16) #define PRIORITY_ALWAYS_ON (1 << 20) -#define INTERLEAVE_EN (1 << 0) - -#define LATENCY_WATERMARK_MASK(x) ((x) << 16) -#define DC_LB_MEMORY_CONFIG(x) ((x) << 20) -#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8) - -#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) -#define GRPH_ENDIAN_NONE 0 -#define GRPH_ENDIAN_8IN16 1 -#define GRPH_ENDIAN_8IN32 2 -#define GRPH_ENDIAN_8IN64 3 -#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) -#define GRPH_RED_SEL_R 0 -#define GRPH_RED_SEL_G 1 -#define GRPH_RED_SEL_B 2 -#define GRPH_RED_SEL_A 3 -#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) -#define GRPH_GREEN_SEL_G 0 -#define GRPH_GREEN_SEL_B 1 -#define GRPH_GREEN_SEL_A 2 -#define GRPH_GREEN_SEL_R 3 -#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) -#define GRPH_BLUE_SEL_B 0 -#define GRPH_BLUE_SEL_A 1 -#define GRPH_BLUE_SEL_R 2 -#define GRPH_BLUE_SEL_G 3 -#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) -#define GRPH_ALPHA_SEL_A 0 -#define GRPH_ALPHA_SEL_R 1 -#define GRPH_ALPHA_SEL_G 2 -#define GRPH_ALPHA_SEL_B 3 - -#define GRPH_DEPTH(x) (((x) & 0x3) << 0) -#define GRPH_DEPTH_8BPP 0 -#define GRPH_DEPTH_16BPP 1 -#define GRPH_DEPTH_32BPP 2 - -#define GRPH_FORMAT(x) (((x) & 0x7) << 8) -#define GRPH_FORMAT_INDEXED 0 -#define GRPH_FORMAT_ARGB1555 0 -#define GRPH_FORMAT_ARGB565 1 -#define GRPH_FORMAT_ARGB4444 2 -#define GRPH_FORMAT_AI88 3 -#define GRPH_FORMAT_MONO16 4 -#define GRPH_FORMAT_BGRA5551 5 -#define GRPH_FORMAT_ARGB8888 0 -#define GRPH_FORMAT_ARGB2101010 1 -#define GRPH_FORMAT_32BPP_DIG 2 -#define GRPH_FORMAT_8B_ARGB2101010 3 -#define GRPH_FORMAT_BGRA1010102 4 -#define GRPH_FORMAT_8B_BGRA1010102 5 -#define GRPH_FORMAT_RGB111110 6 -#define GRPH_FORMAT_BGR101111 7 - -#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) -#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) -#define GRPH_ARRAY_LINEAR_GENERAL 0 -#define GRPH_ARRAY_LINEAR_ALIGNED 1 -#define GRPH_ARRAY_1D_TILED_THIN1 2 -#define GRPH_ARRAY_2D_TILED_THIN1 4 -#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) -#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) -#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) -#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) -#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) -#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24) - -#define CURSOR_EN (1 << 0) -#define CURSOR_MODE(x) (((x) & 0x3) << 8) -#define CURSOR_MONO 0 -#define CURSOR_24_1 1 -#define CURSOR_24_8_PRE_MULT 2 -#define CURSOR_24_8_UNPRE_MULT 3 -#define CURSOR_2X_MAGNIFY (1 << 16) -#define CURSOR_FORCE_MC_ON (1 << 20) -#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24) -#define CURSOR_URGENT_ALWAYS 0 -#define CURSOR_URGENT_1_8 1 -#define CURSOR_URGENT_1_4 2 -#define CURSOR_URGENT_3_8 3 -#define CURSOR_URGENT_1_2 4 -#define CURSOR_UPDATE_PENDING (1 << 0) -#define CURSOR_UPDATE_TAKEN (1 << 1) -#define CURSOR_UPDATE_LOCK (1 << 16) -#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24) - - -#define ES_AND_GS_AUTO 3 -#define RADEON_PACKET_TYPE3 3 -#define CE_PARTITION_BASE 3 -#define BUF_SWAP_32BIT (2 << 16) #define GFX_POWER_STATUS (1 << 1) #define GFX_CLOCK_STATUS (1 << 2) #define GFX_LS_STATUS (1 << 3) -#define RLC_BUSY_STATUS (1 << 0) +#define RLC_BUSY_STATUS (1 << 0) #define RLC_PUD(x) ((x) << 0) #define RLC_PUD_MASK (0xff << 0) #define RLC_PDD(x) ((x) << 8) @@ -140,140 +40,8 @@ #define RLC_TTPD_MASK (0xff << 16) #define RLC_MSD(x) ((x) << 24) #define RLC_MSD_MASK (0xff << 24) -#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30) -#define WRITE_DATA_DST_SEL(x) ((x) << 8) -#define EVENT_TYPE(x) ((x) << 0) -#define EVENT_INDEX(x) ((x) << 8) -#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) -#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) -#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) -#define GFX6_NUM_GFX_RINGS 1 -#define GFX6_NUM_COMPUTE_RINGS 2 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D -#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ - (((op) & 0xFF) << 8) | \ - ((n) & 0x3FFF) << 16) -#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) -#define PACKET3_NOP 0x10 -#define PACKET3_SET_BASE 0x11 -#define PACKET3_BASE_INDEX(x) ((x) << 0) -#define PACKET3_CLEAR_STATE 0x12 -#define PACKET3_INDEX_BUFFER_SIZE 0x13 -#define PACKET3_DISPATCH_DIRECT 0x15 -#define PACKET3_DISPATCH_INDIRECT 0x16 -#define PACKET3_ALLOC_GDS 0x1B -#define PACKET3_WRITE_GDS_RAM 0x1C -#define PACKET3_ATOMIC_GDS 0x1D -#define PACKET3_ATOMIC 0x1E -#define PACKET3_OCCLUSION_QUERY 0x1F -#define PACKET3_SET_PREDICATION 0x20 -#define PACKET3_REG_RMW 0x21 -#define PACKET3_COND_EXEC 0x22 -#define PACKET3_PRED_EXEC 0x23 -#define PACKET3_DRAW_INDIRECT 0x24 -#define PACKET3_DRAW_INDEX_INDIRECT 0x25 -#define PACKET3_INDEX_BASE 0x26 -#define PACKET3_DRAW_INDEX_2 0x27 -#define PACKET3_CONTEXT_CONTROL 0x28 -#define PACKET3_INDEX_TYPE 0x2A -#define PACKET3_DRAW_INDIRECT_MULTI 0x2C -#define PACKET3_DRAW_INDEX_AUTO 0x2D -#define PACKET3_DRAW_INDEX_IMMD 0x2E -#define PACKET3_NUM_INSTANCES 0x2F -#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 -#define PACKET3_INDIRECT_BUFFER_CONST 0x31 -#define PACKET3_INDIRECT_BUFFER 0x3F -#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 -#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 -#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 -#define PACKET3_WRITE_DATA 0x37 -#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 -#define PACKET3_MEM_SEMAPHORE 0x39 -#define PACKET3_MPEG_INDEX 0x3A -#define PACKET3_COPY_DW 0x3B -#define PACKET3_WAIT_REG_MEM 0x3C -#define PACKET3_MEM_WRITE 0x3D -#define PACKET3_COPY_DATA 0x40 -#define PACKET3_CP_DMA 0x41 -# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) -# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) -# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29) -# define PACKET3_CP_DMA_CP_SYNC (1 << 31) -# define PACKET3_CP_DMA_DIS_WC (1 << 21) -# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22) -# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) -# define PACKET3_CP_DMA_CMD_SAS (1 << 26) -# define PACKET3_CP_DMA_CMD_DAS (1 << 27) -# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) -# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) -# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30) -#define PACKET3_PFP_SYNC_ME 0x42 -#define PACKET3_SURFACE_SYNC 0x43 -# define PACKET3_DEST_BASE_0_ENA (1 << 0) -# define PACKET3_DEST_BASE_1_ENA (1 << 1) -# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) -# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) -# define PACKET3_CB2_DEST_BASE_ENA (1 << 8) -# define PACKET3_CB3_DEST_BASE_ENA (1 << 9) -# define PACKET3_CB4_DEST_BASE_ENA (1 << 10) -# define PACKET3_CB5_DEST_BASE_ENA (1 << 11) -# define PACKET3_CB6_DEST_BASE_ENA (1 << 12) -# define PACKET3_CB7_DEST_BASE_ENA (1 << 13) -# define PACKET3_DB_DEST_BASE_ENA (1 << 14) -# define PACKET3_DEST_BASE_2_ENA (1 << 19) -# define PACKET3_DEST_BASE_3_ENA (1 << 21) -# define PACKET3_TCL1_ACTION_ENA (1 << 22) -# define PACKET3_TC_ACTION_ENA (1 << 23) -# define PACKET3_CB_ACTION_ENA (1 << 25) -# define PACKET3_DB_ACTION_ENA (1 << 26) -# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27) -# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29) -#define PACKET3_ME_INITIALIZE 0x44 -#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) -#define PACKET3_COND_WRITE 0x45 -#define PACKET3_EVENT_WRITE 0x46 -#define PACKET3_EVENT_WRITE_EOP 0x47 -#define PACKET3_EVENT_WRITE_EOS 0x48 -#define PACKET3_PREAMBLE_CNTL 0x4A -# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) -# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) -#define PACKET3_ONE_REG_WRITE 0x57 -#define PACKET3_LOAD_CONFIG_REG 0x5F -#define PACKET3_LOAD_CONTEXT_REG 0x60 -#define PACKET3_LOAD_SH_REG 0x61 -#define PACKET3_SET_CONFIG_REG 0x68 -#define PACKET3_SET_CONFIG_REG_START 0x00002000 -#define PACKET3_SET_CONFIG_REG_END 0x00002c00 -#define PACKET3_SET_CONTEXT_REG 0x69 -#define PACKET3_SET_CONTEXT_REG_START 0x000a000 -#define PACKET3_SET_CONTEXT_REG_END 0x000a400 -#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 -#define PACKET3_SET_RESOURCE_INDIRECT 0x74 -#define PACKET3_SET_SH_REG 0x76 -#define PACKET3_SET_SH_REG_START 0x00002c00 -#define PACKET3_SET_SH_REG_END 0x00003000 -#define PACKET3_SET_SH_REG_OFFSET 0x77 -#define PACKET3_ME_WRITE 0x7A -#define PACKET3_SCRATCH_RAM_WRITE 0x7D -#define PACKET3_SCRATCH_RAM_READ 0x7E -#define PACKET3_CE_WRITE 0x7F -#define PACKET3_LOAD_CONST_RAM 0x80 -#define PACKET3_WRITE_CONST_RAM 0x81 -#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82 -#define PACKET3_DUMP_CONST_RAM 0x83 -#define PACKET3_INCREMENT_CE_COUNTER 0x84 -#define PACKET3_INCREMENT_DE_COUNTER 0x85 -#define PACKET3_WAIT_ON_CE_COUNTER 0x86 -#define PACKET3_WAIT_ON_DE_COUNTER 0x87 -#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 -#define PACKET3_SET_CE_DE_COUNTERS 0x89 -#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A -#define PACKET3_SWITCH_BUFFER 0x8B -#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12) -#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) -#define PACKET3_SEM_SEL_WAIT (0x7 << 29) - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 5c38e1fb1dca..1df00f8a2406 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -27,6 +27,7 @@ #include "amdgpu_ih.h" #include "sid.h" #include "si_ih.h" + #include "oss/oss_1_0_d.h" #include "oss/oss_1_0_sh_mask.h" @@ -213,7 +214,7 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block) static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - u32 tmp = RREG32(SRBM_STATUS); + u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) return false; @@ -239,23 +240,23 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; u32 srbm_soft_reset = 0; - u32 tmp = RREG32(SRBM_STATUS); + u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; if (srbm_soft_reset) { - tmp = RREG32(SRBM_SOFT_RESET); + tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(SRBM_SOFT_RESET, tmp); - tmp = RREG32(SRBM_SOFT_RESET); + dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); udelay(50); tmp &= ~srbm_soft_reset; - WREG32(SRBM_SOFT_RESET, tmp); - tmp = RREG32(SRBM_SOFT_RESET); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h index cbf232f5235b..cbd4f8951cfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sid.h +++ b/drivers/gpu/drm/amd/amdgpu/sid.h @@ -24,43 +24,12 @@ #ifndef SI_H #define SI_H -#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 - -#define SI_MAX_SH_GPRS 256 -#define SI_MAX_TEMP_GPRS 16 -#define SI_MAX_SH_THREADS 256 -#define SI_MAX_SH_STACK_ENTRIES 4096 -#define SI_MAX_FRC_EOV_CNT 16384 -#define SI_MAX_BACKENDS 8 -#define SI_MAX_BACKENDS_MASK 0xFF -#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F -#define SI_MAX_SIMDS 12 -#define SI_MAX_SIMDS_MASK 0x0FFF -#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF -#define SI_MAX_PIPES 8 -#define SI_MAX_PIPES_MASK 0xFF -#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F -#define SI_MAX_LDS_NUM 0xFFFF -#define SI_MAX_TCC 16 -#define SI_MAX_TCC_MASK 0xFFFF #define SI_MAX_CTLACKS_ASSERTION_WAIT 100 -/* SMC IND accessor regs */ -#define SMC_IND_INDEX_0 0x80 -#define SMC_IND_DATA_0 0x81 - -#define SMC_IND_ACCESS_CNTL 0x8A -# define AUTO_INCREMENT_IND_0 (1 << 0) -#define SMC_MESSAGE_0 0x8B -#define SMC_RESP_0 0x8C - /* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */ #define SMC_CG_IND_START 0xc0030000 #define SMC_CG_IND_END 0xc0040000 -#define CG_CGTT_LOCAL_0 0x400 -#define CG_CGTT_LOCAL_1 0x401 - /* SMC IND registers */ #define SMC_SYSCON_RESET_CNTL 0x80000000 # define RST_REG (1 << 0) @@ -68,9 +37,6 @@ # define CK_DISABLE (1 << 0) # define CKEN (1 << 24) -#define VGA_HDP_CONTROL 0xCA -#define VGA_MEMORY_DISABLE (1 << 4) - #define DCCG_DISP_SLOW_SELECT_REG 0x13F #define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0) #define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0) @@ -79,47 +45,6 @@ #define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4) #define DCCG_DISP2_SLOW_SELECT_SHIFT 4 -#define CG_SPLL_FUNC_CNTL 0x180 -#define SPLL_RESET (1 << 0) -#define SPLL_SLEEP (1 << 1) -#define SPLL_BYPASS_EN (1 << 3) -#define SPLL_REF_DIV(x) ((x) << 4) -#define SPLL_REF_DIV_MASK (0x3f << 4) -#define SPLL_PDIV_A(x) ((x) << 20) -#define SPLL_PDIV_A_MASK (0x7f << 20) -#define SPLL_PDIV_A_SHIFT 20 -#define CG_SPLL_FUNC_CNTL_2 0x181 -#define SCLK_MUX_SEL(x) ((x) << 0) -#define SCLK_MUX_SEL_MASK (0x1ff << 0) -#define SPLL_CTLREQ_CHG (1 << 23) -#define SCLK_MUX_UPDATE (1 << 26) -#define CG_SPLL_FUNC_CNTL_3 0x182 -#define SPLL_FB_DIV(x) ((x) << 0) -#define SPLL_FB_DIV_MASK (0x3ffffff << 0) -#define SPLL_FB_DIV_SHIFT 0 -#define SPLL_DITHEN (1 << 28) -#define CG_SPLL_FUNC_CNTL_4 0x183 - -#define SPLL_STATUS 0x185 -#define SPLL_CHG_STATUS (1 << 1) -#define SPLL_CNTL_MODE 0x186 -#define SPLL_SW_DIR_CONTROL (1 << 0) -# define SPLL_REFCLK_SEL(x) ((x) << 26) -# define SPLL_REFCLK_SEL_MASK (3 << 26) - -#define CG_SPLL_SPREAD_SPECTRUM 0x188 -#define SSEN (1 << 0) -#define CLK_S(x) ((x) << 4) -#define CLK_S_MASK (0xfff << 4) -#define CLK_S_SHIFT 4 -#define CG_SPLL_SPREAD_SPECTRUM_2 0x189 -#define CLK_V(x) ((x) << 0) -#define CLK_V_MASK (0x3ffffff << 0) -#define CLK_V_SHIFT 0 - -#define CG_SPLL_AUTOSCALE_CNTL 0x18b -# define AUTOSCALE_ON_SS_CLEAR (1 << 9) - /* discrete uvd clocks */ #define CG_UPLL_FUNC_CNTL 0x18d # define UPLL_RESET_MASK 0x00000001 @@ -149,317 +74,13 @@ #define CG_UPLL_SPREAD_SPECTRUM 0x194 # define SSEN_MASK 0x00000001 -#define MPLL_BYPASSCLK_SEL 0x197 -# define MPLL_CLKOUT_SEL(x) ((x) << 8) -# define MPLL_CLKOUT_SEL_MASK 0xFF00 - -#define CG_CLKPIN_CNTL 0x198 -# define XTALIN_DIVIDE (1 << 1) -# define BCLK_AS_XCLK (1 << 2) -#define CG_CLKPIN_CNTL_2 0x199 -# define FORCE_BIF_REFCLK_EN (1 << 3) -# define MUX_TCLK_TO_XCLK (1 << 8) - -#define THM_CLK_CNTL 0x19b -# define CMON_CLK_SEL(x) ((x) << 0) -# define CMON_CLK_SEL_MASK 0xFF -# define TMON_CLK_SEL(x) ((x) << 8) -# define TMON_CLK_SEL_MASK 0xFF00 -#define MISC_CLK_CNTL 0x19c -# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0) -# define DEEP_SLEEP_CLK_SEL_MASK 0xFF -# define ZCLK_SEL(x) ((x) << 8) -# define ZCLK_SEL_MASK 0xFF00 - -#define CG_THERMAL_CTRL 0x1c0 -#define DPM_EVENT_SRC(x) ((x) << 0) -#define DPM_EVENT_SRC_MASK (7 << 0) -#define DIG_THERM_DPM(x) ((x) << 14) -#define DIG_THERM_DPM_MASK 0x003FC000 -#define DIG_THERM_DPM_SHIFT 14 -#define CG_THERMAL_STATUS 0x1c1 -#define FDO_PWM_DUTY(x) ((x) << 9) -#define FDO_PWM_DUTY_MASK (0xff << 9) -#define FDO_PWM_DUTY_SHIFT 9 -#define CG_THERMAL_INT 0x1c2 -#define DIG_THERM_INTH(x) ((x) << 8) -#define DIG_THERM_INTH_MASK 0x0000FF00 -#define DIG_THERM_INTH_SHIFT 8 -#define DIG_THERM_INTL(x) ((x) << 16) -#define DIG_THERM_INTL_MASK 0x00FF0000 -#define DIG_THERM_INTL_SHIFT 16 -#define THERM_INT_MASK_HIGH (1 << 24) -#define THERM_INT_MASK_LOW (1 << 25) - -#define CG_MULT_THERMAL_CTRL 0x1c4 -#define TEMP_SEL(x) ((x) << 20) -#define TEMP_SEL_MASK (0xff << 20) -#define TEMP_SEL_SHIFT 20 -#define CG_MULT_THERMAL_STATUS 0x1c5 -#define ASIC_MAX_TEMP(x) ((x) << 0) -#define ASIC_MAX_TEMP_MASK 0x000001ff -#define ASIC_MAX_TEMP_SHIFT 0 -#define CTF_TEMP(x) ((x) << 9) -#define CTF_TEMP_MASK 0x0003fe00 -#define CTF_TEMP_SHIFT 9 - -#define CG_FDO_CTRL0 0x1d5 -#define FDO_STATIC_DUTY(x) ((x) << 0) -#define FDO_STATIC_DUTY_MASK 0x000000FF -#define FDO_STATIC_DUTY_SHIFT 0 -#define CG_FDO_CTRL1 0x1d6 -#define FMAX_DUTY100(x) ((x) << 0) -#define FMAX_DUTY100_MASK 0x000000FF -#define FMAX_DUTY100_SHIFT 0 -#define CG_FDO_CTRL2 0x1d7 -#define TMIN(x) ((x) << 0) -#define TMIN_MASK 0x000000FF -#define TMIN_SHIFT 0 -#define FDO_PWM_MODE(x) ((x) << 11) -#define FDO_PWM_MODE_MASK (7 << 11) -#define FDO_PWM_MODE_SHIFT 11 -#define TACH_PWM_RESP_RATE(x) ((x) << 25) -#define TACH_PWM_RESP_RATE_MASK (0x7f << 25) -#define TACH_PWM_RESP_RATE_SHIFT 25 - -#define CG_TACH_CTRL 0x1dc -# define EDGE_PER_REV(x) ((x) << 0) -# define EDGE_PER_REV_MASK (0x7 << 0) -# define EDGE_PER_REV_SHIFT 0 -# define TARGET_PERIOD(x) ((x) << 3) -# define TARGET_PERIOD_MASK 0xfffffff8 -# define TARGET_PERIOD_SHIFT 3 -#define CG_TACH_STATUS 0x1dd -# define TACH_PERIOD(x) ((x) << 0) -# define TACH_PERIOD_MASK 0xffffffff -# define TACH_PERIOD_SHIFT 0 - -#define GENERAL_PWRMGT 0x1e0 -# define GLOBAL_PWRMGT_EN (1 << 0) -# define STATIC_PM_EN (1 << 1) -# define THERMAL_PROTECTION_DIS (1 << 2) -# define THERMAL_PROTECTION_TYPE (1 << 3) -# define SW_SMIO_INDEX(x) ((x) << 6) -# define SW_SMIO_INDEX_MASK (1 << 6) -# define SW_SMIO_INDEX_SHIFT 6 -# define VOLT_PWRMGT_EN (1 << 10) -# define DYN_SPREAD_SPECTRUM_EN (1 << 23) -#define CG_TPC 0x1e1 -#define SCLK_PWRMGT_CNTL 0x1e2 -# define SCLK_PWRMGT_OFF (1 << 0) -# define SCLK_LOW_D1 (1 << 1) -# define FIR_RESET (1 << 4) -# define FIR_FORCE_TREND_SEL (1 << 5) -# define FIR_TREND_MODE (1 << 6) -# define DYN_GFX_CLK_OFF_EN (1 << 7) -# define GFX_CLK_FORCE_ON (1 << 8) -# define GFX_CLK_REQUEST_OFF (1 << 9) -# define GFX_CLK_FORCE_OFF (1 << 10) -# define GFX_CLK_OFF_ACPI_D1 (1 << 11) -# define GFX_CLK_OFF_ACPI_D2 (1 << 12) -# define GFX_CLK_OFF_ACPI_D3 (1 << 13) -# define DYN_LIGHT_SLEEP_EN (1 << 14) - -#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6 -# define CURRENT_STATE_INDEX_MASK (0xf << 4) -# define CURRENT_STATE_INDEX_SHIFT 4 - -#define CG_FTV 0x1ef - -#define CG_FFCT_0 0x1f0 -# define UTC_0(x) ((x) << 0) -# define UTC_0_MASK (0x3ff << 0) -# define DTC_0(x) ((x) << 10) -# define DTC_0_MASK (0x3ff << 10) - -#define CG_BSP 0x1ff -# define BSP(x) ((x) << 0) -# define BSP_MASK (0xffff << 0) -# define BSU(x) ((x) << 16) -# define BSU_MASK (0xf << 16) -#define CG_AT 0x200 -# define CG_R(x) ((x) << 0) -# define CG_R_MASK (0xffff << 0) -# define CG_L(x) ((x) << 16) -# define CG_L_MASK (0xffff << 16) - -#define CG_GIT 0x201 -# define CG_GICST(x) ((x) << 0) -# define CG_GICST_MASK (0xffff << 0) -# define CG_GIPOT(x) ((x) << 16) -# define CG_GIPOT_MASK (0xffff << 16) - -#define CG_SSP 0x203 -# define SST(x) ((x) << 0) -# define SST_MASK (0xffff << 0) -# define SSTU(x) ((x) << 16) -# define SSTU_MASK (0xf << 16) - -#define CG_DISPLAY_GAP_CNTL 0x20a -# define DISP1_GAP(x) ((x) << 0) -# define DISP1_GAP_MASK (3 << 0) -# define DISP2_GAP(x) ((x) << 2) -# define DISP2_GAP_MASK (3 << 2) -# define VBI_TIMER_COUNT(x) ((x) << 4) -# define VBI_TIMER_COUNT_MASK (0x3fff << 4) -# define VBI_TIMER_UNIT(x) ((x) << 20) -# define VBI_TIMER_UNIT_MASK (7 << 20) -# define DISP1_GAP_MCHG(x) ((x) << 24) -# define DISP1_GAP_MCHG_MASK (3 << 24) -# define DISP2_GAP_MCHG(x) ((x) << 26) -# define DISP2_GAP_MCHG_MASK (3 << 26) - -#define CG_ULV_CONTROL 0x21e -#define CG_ULV_PARAMETER 0x21f - -#define SMC_SCRATCH0 0x221 - -#define CG_CAC_CTRL 0x22e -# define CAC_WINDOW(x) ((x) << 0) -# define CAC_WINDOW_MASK 0x00ffffff - -#define DMIF_ADDR_CONFIG 0x2F5 - -#define DMIF_ADDR_CALC 0x300 - -#define PIPE0_DMIF_BUFFER_CONTROL 0x0328 -# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) -# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) - -#define SRBM_STATUS 0x394 -#define GRBM_RQ_PENDING (1 << 5) -#define VMC_BUSY (1 << 8) -#define MCB_BUSY (1 << 9) -#define MCB_NON_DISPLAY_BUSY (1 << 10) -#define MCC_BUSY (1 << 11) -#define MCD_BUSY (1 << 12) -#define SEM_BUSY (1 << 14) -#define IH_BUSY (1 << 17) - -#define SRBM_SOFT_RESET 0x398 -#define SOFT_RESET_BIF (1 << 1) -#define SOFT_RESET_DC (1 << 5) -#define SOFT_RESET_DMA1 (1 << 6) -#define SOFT_RESET_GRBM (1 << 8) -#define SOFT_RESET_HDP (1 << 9) -#define SOFT_RESET_IH (1 << 10) -#define SOFT_RESET_MC (1 << 11) -#define SOFT_RESET_ROM (1 << 14) -#define SOFT_RESET_SEM (1 << 15) -#define SOFT_RESET_VMC (1 << 17) -#define SOFT_RESET_DMA (1 << 20) -#define SOFT_RESET_TST (1 << 21) -#define SOFT_RESET_REGBB (1 << 22) -#define SOFT_RESET_ORB (1 << 23) - -#define CC_SYS_RB_BACKEND_DISABLE 0x3A0 -#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1 - -#define SRBM_READ_ERROR 0x3A6 -#define SRBM_INT_CNTL 0x3A8 -#define SRBM_INT_ACK 0x3AA - -#define SRBM_STATUS2 0x3B1 -#define DMA_BUSY (1 << 5) -#define DMA1_BUSY (1 << 6) - -#define VM_L2_CNTL 0x500 -#define ENABLE_L2_CACHE (1 << 0) -#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) -#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2) -#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4) -#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9) -#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10) -#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15) -#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19) -#define VM_L2_CNTL2 0x501 -#define INVALIDATE_ALL_L1_TLBS (1 << 0) -#define INVALIDATE_L2_CACHE (1 << 1) -#define INVALIDATE_CACHE_MODE(x) ((x) << 26) -#define INVALIDATE_PTE_AND_PDE_CACHES 0 -#define INVALIDATE_ONLY_PTE_CACHES 1 -#define INVALIDATE_ONLY_PDE_CACHES 2 -#define VM_L2_CNTL3 0x502 -#define BANK_SELECT(x) ((x) << 0) -#define L2_CACHE_UPDATE_MODE(x) ((x) << 6) -#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15) -#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20) -#define VM_L2_STATUS 0x503 -#define L2_BUSY (1 << 0) -#define VM_CONTEXT0_CNTL 0x504 -#define ENABLE_CONTEXT (1 << 0) -#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) -#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3) -#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) -#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6) -#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7) -#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9) -#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10) -#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12) -#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13) -#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15) -#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) -#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) -#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) -#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) -#define VM_CONTEXT1_CNTL 0x505 -#define VM_CONTEXT0_CNTL2 0x50C -#define VM_CONTEXT1_CNTL2 0x50D -#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E -#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F -#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510 -#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511 -#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512 -#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513 -#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514 -#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515 - -#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f -#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537 -#define PROTECTIONS_MASK (0xf << 0) -#define PROTECTIONS_SHIFT 0 - /* bit 0: range - * bit 1: pde0 - * bit 2: valid - * bit 3: read - * bit 4: write - */ -#define MEMORY_CLIENT_ID_MASK (0xff << 12) -#define MEMORY_CLIENT_ID_SHIFT 12 -#define MEMORY_CLIENT_RW_MASK (1 << 24) -#define MEMORY_CLIENT_RW_SHIFT 24 -#define FAULT_VMID_MASK (0xf << 25) -#define FAULT_VMID_SHIFT 25 - #define VM_INVALIDATE_REQUEST 0x51E #define VM_INVALIDATE_RESPONSE 0x51F -#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546 -#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547 - -#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F -#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550 -#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551 -#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552 -#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553 -#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554 -#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555 -#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556 -#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557 -#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558 - -#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F -#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560 - #define VM_L2_CG 0x570 #define MC_CG_ENABLE (1 << 18) #define MC_LS_ENABLE (1 << 19) -#define MC_SHARED_CHMAP 0x801 -#define NOOFCHAN_SHIFT 12 -#define NOOFCHAN_MASK 0x0000f000 -#define MC_SHARED_CHREMAP 0x802 - #define MC_VM_FB_LOCATION 0x809 #define MC_VM_AGP_TOP 0x80A #define MC_VM_AGP_BOT 0x80B @@ -491,21 +112,6 @@ #define MC_CITF_MISC_WR_CG 0x993 #define MC_CITF_MISC_VM_CG 0x994 -#define MC_ARB_RAMCFG 0x9D8 -#define NOOFBANK_SHIFT 0 -#define NOOFBANK_MASK 0x00000003 -#define NOOFRANK_SHIFT 2 -#define NOOFRANK_MASK 0x00000004 -#define NOOFROWS_SHIFT 3 -#define NOOFROWS_MASK 0x00000038 -#define NOOFCOLS_SHIFT 6 -#define NOOFCOLS_MASK 0x000000C0 -#define CHANSIZE_SHIFT 8 -#define CHANSIZE_MASK 0x00000100 -#define CHANSIZE_OVERRIDE (1 << 11) -#define NOOFGROUPS_SHIFT 12 -#define NOOFGROUPS_MASK 0x00001000 - #define MC_ARB_DRAM_TIMING 0x9DD #define MC_ARB_DRAM_TIMING2 0x9DE @@ -631,20 +237,6 @@ #define CLKS(x) ((x) << 0) #define CLKS_MASK (0xfff << 0) -#define HDP_HOST_PATH_CNTL 0xB00 -#define CLOCK_GATING_DIS (1 << 23) -#define HDP_NONSURFACE_BASE 0xB01 -#define HDP_NONSURFACE_INFO 0xB02 -#define HDP_NONSURFACE_SIZE 0xB03 - -#define HDP_DEBUG0 0xBCC - -#define HDP_ADDR_CONFIG 0xBD2 -#define HDP_MISC_CNTL 0xBD3 -#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) -#define HDP_MEM_POWER_LS 0xBD4 -#define HDP_LS_ENABLE (1 << 0) - #define ATC_MISC_CG 0xCD4 #define IH_RB_CNTL 0xF80 @@ -674,8 +266,6 @@ # define MC_WR_CLEAN_CNT(x) ((x) << 20) # define MC_VMID(x) ((x) << 25) -#define CONFIG_MEMSIZE 0x150A - #define INTERRUPT_CNTL 0x151A # define IH_DUMMY_RD_OVERRIDE (1 << 0) # define IH_DUMMY_RD_EN (1 << 1) @@ -683,486 +273,22 @@ # define GEN_IH_INT_EN (1 << 8) #define INTERRUPT_CNTL2 0x151B -#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520 - -#define BIF_FB_EN 0x1524 -#define FB_READ_EN (1 << 0) -#define FB_WRITE_EN (1 << 1) - -#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528 - -/* DCE6 ELD audio interface */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */ -#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */ -# define MAX_CHANNELS(x) (((x) & 0x7) << 0) -/* max channels minus one. 7 = 8 channels */ -# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) -# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) -# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ -/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO - * bit0 = 32 kHz - * bit1 = 44.1 kHz - * bit2 = 48 kHz - * bit3 = 88.2 kHz - * bit4 = 96 kHz - * bit5 = 176.4 kHz - * bit6 = 192 kHz - */ - -#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37 -# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0) -# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8) -/* VIDEO_LIPSYNC, AUDIO_LIPSYNC - * 0 = invalid - * x = legal delay value - * 255 = sync not supported - */ -#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38 -# define HBR_CAPABLE (1 << 0) /* enabled by default */ - -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a -# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0) -# define PRODUCT_ID(x) (((x) & 0xffff) << 16) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b -# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c -# define PORT_ID0(x) (((x) & 0xffffffff) << 0) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d -# define PORT_ID1(x) (((x) & 0xffffffff) << 0) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e -# define DESCRIPTION0(x) (((x) & 0xff) << 0) -# define DESCRIPTION1(x) (((x) & 0xff) << 8) -# define DESCRIPTION2(x) (((x) & 0xff) << 16) -# define DESCRIPTION3(x) (((x) & 0xff) << 24) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f -# define DESCRIPTION4(x) (((x) & 0xff) << 0) -# define DESCRIPTION5(x) (((x) & 0xff) << 8) -# define DESCRIPTION6(x) (((x) & 0xff) << 16) -# define DESCRIPTION7(x) (((x) & 0xff) << 24) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40 -# define DESCRIPTION8(x) (((x) & 0xff) << 0) -# define DESCRIPTION9(x) (((x) & 0xff) << 8) -# define DESCRIPTION10(x) (((x) & 0xff) << 16) -# define DESCRIPTION11(x) (((x) & 0xff) << 24) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41 -# define DESCRIPTION12(x) (((x) & 0xff) << 0) -# define DESCRIPTION13(x) (((x) & 0xff) << 8) -# define DESCRIPTION14(x) (((x) & 0xff) << 16) -# define DESCRIPTION15(x) (((x) & 0xff) << 24) -#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42 -# define DESCRIPTION16(x) (((x) & 0xff) << 0) -# define DESCRIPTION17(x) (((x) & 0xff) << 8) - -#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54 -# define AUDIO_ENABLED (1 << 31) - -#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56 -#define PORT_CONNECTIVITY_MASK (3 << 30) -#define PORT_CONNECTIVITY_SHIFT 30 - -#define DC_LB_MEMORY_SPLIT 0x1AC3 -#define DC_LB_MEMORY_CONFIG(x) ((x) << 20) - -#define PRIORITY_A_CNT 0x1AC6 -#define PRIORITY_MARK_MASK 0x7fff -#define PRIORITY_OFF (1 << 16) -#define PRIORITY_ALWAYS_ON (1 << 20) -#define PRIORITY_B_CNT 0x1AC7 - -#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32 -# define LATENCY_WATERMARK_MASK(x) ((x) << 16) -#define DPG_PIPE_LATENCY_CONTROL 0x1B33 -# define LATENCY_LOW_WATERMARK(x) ((x) << 0) -# define LATENCY_HIGH_WATERMARK(x) ((x) << 16) - -/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ -#define VLINE_STATUS 0x1AEE -# define VLINE_OCCURRED (1 << 0) -# define VLINE_ACK (1 << 4) -# define VLINE_STAT (1 << 12) -# define VLINE_INTERRUPT (1 << 16) -# define VLINE_INTERRUPT_TYPE (1 << 17) -/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ -#define VBLANK_STATUS 0x1AEF -# define VBLANK_OCCURRED (1 << 0) -# define VBLANK_ACK (1 << 4) -# define VBLANK_STAT (1 << 12) -# define VBLANK_INTERRUPT (1 << 16) -# define VBLANK_INTERRUPT_TYPE (1 << 17) - -/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ -#define INT_MASK 0x1AD0 -# define VBLANK_INT_MASK (1 << 0) -# define VLINE_INT_MASK (1 << 4) - -#define DISP_INTERRUPT_STATUS 0x183D -# define LB_D1_VLINE_INTERRUPT (1 << 2) -# define LB_D1_VBLANK_INTERRUPT (1 << 3) -# define DC_HPD1_INTERRUPT (1 << 17) -# define DC_HPD1_RX_INTERRUPT (1 << 18) -# define DACA_AUTODETECT_INTERRUPT (1 << 22) -# define DACB_AUTODETECT_INTERRUPT (1 << 23) -# define DC_I2C_SW_DONE_INTERRUPT (1 << 24) -# define DC_I2C_HW_DONE_INTERRUPT (1 << 25) -#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E -# define LB_D2_VLINE_INTERRUPT (1 << 2) -# define LB_D2_VBLANK_INTERRUPT (1 << 3) -# define DC_HPD2_INTERRUPT (1 << 17) -# define DC_HPD2_RX_INTERRUPT (1 << 18) -# define DISP_TIMER_INTERRUPT (1 << 24) -#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F -# define LB_D3_VLINE_INTERRUPT (1 << 2) -# define LB_D3_VBLANK_INTERRUPT (1 << 3) -# define DC_HPD3_INTERRUPT (1 << 17) -# define DC_HPD3_RX_INTERRUPT (1 << 18) -#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840 -# define LB_D4_VLINE_INTERRUPT (1 << 2) -# define LB_D4_VBLANK_INTERRUPT (1 << 3) -# define DC_HPD4_INTERRUPT (1 << 17) -# define DC_HPD4_RX_INTERRUPT (1 << 18) -#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853 -# define LB_D5_VLINE_INTERRUPT (1 << 2) -# define LB_D5_VBLANK_INTERRUPT (1 << 3) -# define DC_HPD5_INTERRUPT (1 << 17) -# define DC_HPD5_RX_INTERRUPT (1 << 18) -#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854 -# define LB_D6_VLINE_INTERRUPT (1 << 2) -# define LB_D6_VBLANK_INTERRUPT (1 << 3) -# define DC_HPD6_INTERRUPT (1 << 17) -# define DC_HPD6_RX_INTERRUPT (1 << 18) - -/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ -#define GRPH_INT_STATUS 0x1A16 -# define GRPH_PFLIP_INT_OCCURRED (1 << 0) -# define GRPH_PFLIP_INT_CLEAR (1 << 8) -/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ -#define GRPH_INT_CONTROL 0x1A17 -# define GRPH_PFLIP_INT_MASK (1 << 0) -# define GRPH_PFLIP_INT_TYPE (1 << 8) - -#define DAC_AUTODETECT_INT_CONTROL 0x19F2 - -#define DC_HPD1_INT_STATUS 0x1807 -#define DC_HPD2_INT_STATUS 0x180A -#define DC_HPD3_INT_STATUS 0x180D -#define DC_HPD4_INT_STATUS 0x1810 -#define DC_HPD5_INT_STATUS 0x1813 -#define DC_HPD6_INT_STATUS 0x1816 -# define DC_HPDx_INT_STATUS (1 << 0) -# define DC_HPDx_SENSE (1 << 1) -# define DC_HPDx_RX_INT_STATUS (1 << 8) - -#define DC_HPD1_INT_CONTROL 0x1808 -#define DC_HPD2_INT_CONTROL 0x180B -#define DC_HPD3_INT_CONTROL 0x180E -#define DC_HPD4_INT_CONTROL 0x1811 -#define DC_HPD5_INT_CONTROL 0x1814 -#define DC_HPD6_INT_CONTROL 0x1817 -# define DC_HPDx_INT_ACK (1 << 0) -# define DC_HPDx_INT_POLARITY (1 << 8) -# define DC_HPDx_INT_EN (1 << 16) -# define DC_HPDx_RX_INT_ACK (1 << 20) -# define DC_HPDx_RX_INT_EN (1 << 24) - -#define DC_HPD1_CONTROL 0x1809 -#define DC_HPD2_CONTROL 0x180C -#define DC_HPD3_CONTROL 0x180F -#define DC_HPD4_CONTROL 0x1812 -#define DC_HPD5_CONTROL 0x1815 -#define DC_HPD6_CONTROL 0x1818 -# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) -# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) -# define DC_HPDx_EN (1 << 28) - -#define DPG_PIPE_STUTTER_CONTROL 0x1B35 -# define STUTTER_ENABLE (1 << 0) - -/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ -#define CRTC_STATUS_FRAME_COUNT 0x1BA6 - -/* Audio clocks */ -#define DCCG_AUDIO_DTO0_PHASE 0x05b0 -#define DCCG_AUDIO_DTO0_MODULE 0x05b4 -#define DCCG_AUDIO_DTO1_PHASE 0x05c0 -#define DCCG_AUDIO_DTO1_MODULE 0x05c4 - -#define GRBM_CNTL 0x2000 -#define GRBM_READ_TIMEOUT(x) ((x) << 0) - -#define GRBM_STATUS2 0x2002 -#define RLC_RQ_PENDING (1 << 0) -#define RLC_BUSY (1 << 8) -#define TC_BUSY (1 << 9) - -#define GRBM_STATUS 0x2004 -#define CMDFIFO_AVAIL_MASK 0x0000000F -#define RING2_RQ_PENDING (1 << 4) -#define SRBM_RQ_PENDING (1 << 5) -#define RING1_RQ_PENDING (1 << 6) -#define CF_RQ_PENDING (1 << 7) -#define PF_RQ_PENDING (1 << 8) -#define GDS_DMA_RQ_PENDING (1 << 9) -#define GRBM_EE_BUSY (1 << 10) -#define DB_CLEAN (1 << 12) -#define CB_CLEAN (1 << 13) -#define TA_BUSY (1 << 14) -#define GDS_BUSY (1 << 15) -#define VGT_BUSY (1 << 17) -#define IA_BUSY_NO_DMA (1 << 18) -#define IA_BUSY (1 << 19) -#define SX_BUSY (1 << 20) -#define SPI_BUSY (1 << 22) -#define BCI_BUSY (1 << 23) -#define SC_BUSY (1 << 24) -#define PA_BUSY (1 << 25) -#define DB_BUSY (1 << 26) -#define CP_COHERENCY_BUSY (1 << 28) -#define CP_BUSY (1 << 29) -#define CB_BUSY (1 << 30) -#define GUI_ACTIVE (1 << 31) -#define GRBM_STATUS_SE0 0x2005 -#define GRBM_STATUS_SE1 0x2006 -#define SE_DB_CLEAN (1 << 1) -#define SE_CB_CLEAN (1 << 2) -#define SE_BCI_BUSY (1 << 22) -#define SE_VGT_BUSY (1 << 23) -#define SE_PA_BUSY (1 << 24) -#define SE_TA_BUSY (1 << 25) -#define SE_SX_BUSY (1 << 26) -#define SE_SPI_BUSY (1 << 27) -#define SE_SC_BUSY (1 << 29) -#define SE_DB_BUSY (1 << 30) -#define SE_CB_BUSY (1 << 31) - -#define GRBM_INT_CNTL 0x2018 -# define RDERR_INT_ENABLE (1 << 0) -# define GUI_IDLE_INT_ENABLE (1 << 19) - -#define CP_STRMOUT_CNTL 0x213F -#define SCRATCH_REG0 0x2140 -#define SCRATCH_REG1 0x2141 -#define SCRATCH_REG2 0x2142 -#define SCRATCH_REG3 0x2143 -#define SCRATCH_REG4 0x2144 -#define SCRATCH_REG5 0x2145 -#define SCRATCH_REG6 0x2146 -#define SCRATCH_REG7 0x2147 - -#define SCRATCH_UMSK 0x2150 -#define SCRATCH_ADDR 0x2151 - -#define CP_SEM_WAIT_TIMER 0x216F - -#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172 - -#define CP_ME_CNTL 0x21B6 -#define CP_CE_HALT (1 << 24) -#define CP_PFP_HALT (1 << 26) -#define CP_ME_HALT (1 << 28) - -#define CP_COHER_CNTL2 0x217A - -#define CP_RB2_RPTR 0x21BE -#define CP_RB1_RPTR 0x21BF -#define CP_RB0_RPTR 0x21C0 -#define CP_RB_WPTR_DELAY 0x21C1 - -#define CP_QUEUE_THRESHOLDS 0x21D8 -#define ROQ_IB1_START(x) ((x) << 0) -#define ROQ_IB2_START(x) ((x) << 8) -#define CP_MEQ_THRESHOLDS 0x21D9 -#define MEQ1_START(x) ((x) << 0) -#define MEQ2_START(x) ((x) << 8) - -#define CP_PERFMON_CNTL 0x21FF - #define VGT_VTX_VECT_EJECT_REG 0x222C - #define VGT_ESGS_RING_SIZE 0x2232 #define VGT_GSVS_RING_SIZE 0x2233 - #define VGT_GS_VERTEX_REUSE 0x2235 - #define VGT_PRIMITIVE_TYPE 0x2256 #define VGT_INDEX_TYPE 0x2257 - #define VGT_NUM_INDICES 0x225C #define VGT_NUM_INSTANCES 0x225D - #define VGT_TF_RING_SIZE 0x2262 - #define VGT_HS_OFFCHIP_PARAM 0x226C - #define VGT_TF_MEMORY_BASE 0x226E -#define PA_CL_ENHANCE 0x2285 -#define CLIP_VTX_REORDER_ENA (1 << 0) -#define NUM_CLIP_SEQ(x) ((x) << 1) - -#define PA_SU_LINE_STIPPLE_VALUE 0x2298 - -#define PA_SC_LINE_STIPPLE_STATE 0x22C4 - -#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9 -#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) -#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) - -#define PA_SC_FIFO_SIZE 0x22F3 -#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0) -#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6) -#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15) -#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23) - #define PA_SC_ENHANCE 0x22FC -#define SQ_CONFIG 0x2300 - -#define SQC_CACHES 0x2302 - -#define SQ_POWER_THROTTLE 0x2396 -#define MIN_POWER(x) ((x) << 0) -#define MIN_POWER_MASK (0x3fff << 0) -#define MIN_POWER_SHIFT 0 -#define MAX_POWER(x) ((x) << 16) -#define MAX_POWER_MASK (0x3fff << 16) -#define MAX_POWER_SHIFT 0 -#define SQ_POWER_THROTTLE2 0x2397 -#define MAX_POWER_DELTA(x) ((x) << 0) -#define MAX_POWER_DELTA_MASK (0x3fff << 0) -#define MAX_POWER_DELTA_SHIFT 0 -#define STI_SIZE(x) ((x) << 16) -#define STI_SIZE_MASK (0x3ff << 16) -#define STI_SIZE_SHIFT 16 -#define LTI_RATIO(x) ((x) << 27) -#define LTI_RATIO_MASK (0xf << 27) -#define LTI_RATIO_SHIFT 27 - -#define SX_DEBUG_1 0x2418 - -#define SPI_STATIC_THREAD_MGMT_1 0x2438 -#define SPI_STATIC_THREAD_MGMT_2 0x2439 -#define SPI_STATIC_THREAD_MGMT_3 0x243A -#define SPI_PS_MAX_WAVE_ID 0x243B - -#define SPI_CONFIG_CNTL 0x2440 - -#define SPI_CONFIG_CNTL_1 0x244F -#define VTX_DONE_DELAY(x) ((x) << 0) -#define INTERP_ONE_PRIM_PER_ROW (1 << 4) - -#define CGTS_TCC_DISABLE 0x2452 -#define CGTS_USER_TCC_DISABLE 0x2453 -#define TCC_DISABLE_MASK 0xFFFF0000 -#define TCC_DISABLE_SHIFT 16 -#define CGTS_SM_CTRL_REG 0x2454 -#define OVERRIDE (1 << 21) -#define LS_OVERRIDE (1 << 22) - -#define SPI_LB_CU_MASK 0x24D5 - #define TA_CNTL_AUX 0x2542 -#define CC_RB_BACKEND_DISABLE 0x263D -#define BACKEND_DISABLE(x) ((x) << 16) -#define GB_ADDR_CONFIG 0x263E -#define NUM_PIPES(x) ((x) << 0) -#define NUM_PIPES_MASK 0x00000007 -#define NUM_PIPES_SHIFT 0 -#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) -#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070 -#define PIPE_INTERLEAVE_SIZE_SHIFT 4 -#define NUM_SHADER_ENGINES(x) ((x) << 12) -#define NUM_SHADER_ENGINES_MASK 0x00003000 -#define NUM_SHADER_ENGINES_SHIFT 12 -#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16) -#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000 -#define SHADER_ENGINE_TILE_SIZE_SHIFT 16 -#define NUM_GPUS(x) ((x) << 20) -#define NUM_GPUS_MASK 0x00700000 -#define NUM_GPUS_SHIFT 20 -#define MULTI_GPU_TILE_SIZE(x) ((x) << 24) -#define MULTI_GPU_TILE_SIZE_MASK 0x03000000 -#define MULTI_GPU_TILE_SIZE_SHIFT 24 -#define ROW_SIZE(x) ((x) << 28) -#define ROW_SIZE_MASK 0x30000000 -#define ROW_SIZE_SHIFT 28 - -#define CB_PERFCOUNTER0_SELECT0 0x2688 -#define CB_PERFCOUNTER0_SELECT1 0x2689 -#define CB_PERFCOUNTER1_SELECT0 0x268A -#define CB_PERFCOUNTER1_SELECT1 0x268B -#define CB_PERFCOUNTER2_SELECT0 0x268C -#define CB_PERFCOUNTER2_SELECT1 0x268D -#define CB_PERFCOUNTER3_SELECT0 0x268E -#define CB_PERFCOUNTER3_SELECT1 0x268F - -#define CB_CGTT_SCLK_CTRL 0x2698 - -#define TCP_CHAN_STEER_LO 0x2B03 -#define TCP_CHAN_STEER_HI 0x2B94 - -#define CP_RB0_BASE 0x3040 -#define CP_RB0_CNTL 0x3041 -#define RB_BUFSZ(x) ((x) << 0) -#define RB_BLKSZ(x) ((x) << 8) -#define BUF_SWAP_32BIT (2 << 16) -#define RB_NO_UPDATE (1 << 27) -#define RB_RPTR_WR_ENA (1 << 31) - -#define CP_RB0_RPTR_ADDR 0x3043 -#define CP_RB0_RPTR_ADDR_HI 0x3044 -#define CP_RB0_WPTR 0x3045 - -#define CP_PFP_UCODE_ADDR 0x3054 -#define CP_PFP_UCODE_DATA 0x3055 -#define CP_ME_RAM_RADDR 0x3056 -#define CP_ME_RAM_WADDR 0x3057 -#define CP_ME_RAM_DATA 0x3058 - -#define CP_CE_UCODE_ADDR 0x305A -#define CP_CE_UCODE_DATA 0x305B - -#define CP_RB1_BASE 0x3060 -#define CP_RB1_CNTL 0x3061 -#define CP_RB1_RPTR_ADDR 0x3062 -#define CP_RB1_RPTR_ADDR_HI 0x3063 -#define CP_RB1_WPTR 0x3064 -#define CP_RB2_BASE 0x3065 -#define CP_RB2_CNTL 0x3066 -#define CP_RB2_RPTR_ADDR 0x3067 -#define CP_RB2_RPTR_ADDR_HI 0x3068 -#define CP_RB2_WPTR 0x3069 -#define CP_INT_CNTL_RING0 0x306A -#define CP_INT_CNTL_RING1 0x306B -#define CP_INT_CNTL_RING2 0x306C -# define CNTX_BUSY_INT_ENABLE (1 << 19) -# define CNTX_EMPTY_INT_ENABLE (1 << 20) -# define WAIT_MEM_SEM_INT_ENABLE (1 << 21) -# define TIME_STAMP_INT_ENABLE (1 << 26) -# define CP_RINGID2_INT_ENABLE (1 << 29) -# define CP_RINGID1_INT_ENABLE (1 << 30) -# define CP_RINGID0_INT_ENABLE (1 << 31) -#define CP_INT_STATUS_RING0 0x306D -#define CP_INT_STATUS_RING1 0x306E -#define CP_INT_STATUS_RING2 0x306F -# define WAIT_MEM_SEM_INT_STAT (1 << 21) -# define TIME_STAMP_INT_STAT (1 << 26) -# define CP_RINGID2_INT_STAT (1 << 29) -# define CP_RINGID1_INT_STAT (1 << 30) -# define CP_RINGID0_INT_STAT (1 << 31) - // #define PA_SC_RASTER_CONFIG 0xA0D4 # define RB_XSEL2(x) ((x) << 4) # define RB_XSEL2_MASK (0x3 << 4) @@ -1185,171 +311,14 @@ # define SE_YSEL(x) ((x) << 28) # define SE_YSEL_MASK (0x3 << 28) -/* PIF PHY0 registers idx/data 0x8/0xc */ -#define PB0_PIF_CNTL 0x10 -# define LS2_EXIT_TIME(x) ((x) << 17) -# define LS2_EXIT_TIME_MASK (0x7 << 17) -# define LS2_EXIT_TIME_SHIFT 17 -#define PB0_PIF_PAIRING 0x11 -# define MULTI_PIF (1 << 25) -#define PB0_PIF_PWRDOWN_0 0x12 -# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7) -# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7) -# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7 -# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10) -# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10) -# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10 -# define PLL_RAMP_UP_TIME_0(x) ((x) << 24) -# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24) -# define PLL_RAMP_UP_TIME_0_SHIFT 24 -#define PB0_PIF_PWRDOWN_1 0x13 -# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7) -# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7) -# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7 -# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10) -# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10) -# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10 -# define PLL_RAMP_UP_TIME_1(x) ((x) << 24) -# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24) -# define PLL_RAMP_UP_TIME_1_SHIFT 24 - -#define PB0_PIF_PWRDOWN_2 0x17 -# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7) -# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7) -# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7 -# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10) -# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10) -# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10 -# define PLL_RAMP_UP_TIME_2(x) ((x) << 24) -# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24) -# define PLL_RAMP_UP_TIME_2_SHIFT 24 -#define PB0_PIF_PWRDOWN_3 0x18 -# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7) -# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7) -# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7 -# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10) -# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10) -# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10 -# define PLL_RAMP_UP_TIME_3(x) ((x) << 24) -# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24) -# define PLL_RAMP_UP_TIME_3_SHIFT 24 -/* PIF PHY1 registers idx/data 0x10/0x14 */ -#define PB1_PIF_CNTL 0x10 -#define PB1_PIF_PAIRING 0x11 -#define PB1_PIF_PWRDOWN_0 0x12 -#define PB1_PIF_PWRDOWN_1 0x13 - -#define PB1_PIF_PWRDOWN_2 0x17 -#define PB1_PIF_PWRDOWN_3 0x18 -/* PCIE registers idx/data 0x30/0x34 */ -#define PCIE_CNTL2 0x1c /* PCIE */ -# define SLV_MEM_LS_EN (1 << 16) -# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17) -# define MST_MEM_LS_EN (1 << 18) -# define REPLAY_MEM_LS_EN (1 << 19) -#define PCIE_LC_STATUS1 0x28 /* PCIE */ -# define LC_REVERSE_RCVR (1 << 0) -# define LC_REVERSE_XMIT (1 << 1) -# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2) -# define LC_OPERATING_LINK_WIDTH_SHIFT 2 -# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5) -# define LC_DETECTED_LINK_WIDTH_SHIFT 5 - -#define PCIE_P_CNTL 0x40 /* PCIE */ -# define P_IGNORE_EDB_ERR (1 << 6) - /* PCIE PORT registers idx/data 0x38/0x3c */ -#define PCIE_LC_CNTL 0xa0 -# define LC_L0S_INACTIVITY(x) ((x) << 8) -# define LC_L0S_INACTIVITY_MASK (0xf << 8) -# define LC_L0S_INACTIVITY_SHIFT 8 -# define LC_L1_INACTIVITY(x) ((x) << 12) -# define LC_L1_INACTIVITY_MASK (0xf << 12) -# define LC_L1_INACTIVITY_SHIFT 12 -# define LC_PMI_TO_L1_DIS (1 << 16) -# define LC_ASPM_TO_L1_DIS (1 << 24) -#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ -# define LC_LINK_WIDTH_SHIFT 0 -# define LC_LINK_WIDTH_MASK 0x7 +// #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ # define LC_LINK_WIDTH_X0 0 # define LC_LINK_WIDTH_X1 1 # define LC_LINK_WIDTH_X2 2 # define LC_LINK_WIDTH_X4 3 # define LC_LINK_WIDTH_X8 4 # define LC_LINK_WIDTH_X16 6 -# define LC_LINK_WIDTH_RD_SHIFT 4 -# define LC_LINK_WIDTH_RD_MASK 0x70 -# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) -# define LC_RECONFIG_NOW (1 << 8) -# define LC_RENEGOTIATION_SUPPORT (1 << 9) -# define LC_RENEGOTIATE_EN (1 << 10) -# define LC_SHORT_RECONFIG_EN (1 << 11) -# define LC_UPCONFIGURE_SUPPORT (1 << 12) -# define LC_UPCONFIGURE_DIS (1 << 13) -# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21) -# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21) -# define LC_DYN_LANES_PWR_STATE_SHIFT 21 -#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */ -# define LC_XMIT_N_FTS(x) ((x) << 0) -# define LC_XMIT_N_FTS_MASK (0xff << 0) -# define LC_XMIT_N_FTS_SHIFT 0 -# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8) -# define LC_N_FTS_MASK (0xff << 24) -#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ -# define LC_GEN2_EN_STRAP (1 << 0) -# define LC_GEN3_EN_STRAP (1 << 1) -# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2) -# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3) -# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3 -# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5) -# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6) -# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7) -# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8) -# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9) -# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10) -# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10 -# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */ -# define LC_CURRENT_DATA_RATE_SHIFT 13 -# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16) -# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18) -# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19) -# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20) -# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21) - -#define PCIE_LC_CNTL2 0xb1 -# define LC_ALLOW_PDWN_IN_L1 (1 << 17) -# define LC_ALLOW_PDWN_IN_L23 (1 << 18) - -#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */ -# define LC_GO_TO_RECOVERY (1 << 30) -#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */ -# define LC_REDO_EQ (1 << 5) -# define LC_SET_QUIESCE (1 << 13) - -/* - * UVD - */ -#define UVD_UDEC_ADDR_CONFIG 0x3bd3 -#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4 -#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 -#define UVD_RBC_RB_RPTR 0x3da4 -#define UVD_RBC_RB_WPTR 0x3da5 -#define UVD_STATUS 0x3daf - -#define UVD_CGC_CTRL 0x3dc2 -# define DCM (1 << 0) -# define CG_DT(x) ((x) << 2) -# define CG_DT_MASK (0xf << 2) -# define CLK_OD(x) ((x) << 6) -# define CLK_OD_MASK (0x1f << 6) - - /* UVD CTX indirect */ -#define UVD_CGC_MEM_CTRL 0xC0 -#define UVD_CGC_CTRL2 0xC1 -# define DYN_OR_EN (1 << 0) -# define DYN_RR_EN (1 << 1) -# define G_DIV_ID(x) ((x) << 2) -# define G_DIV_ID_MASK (0x7 << 2) /* * PM4 @@ -1583,45 +552,7 @@ /* ASYNC DMA - first instance at 0xd000, second at 0xd800 */ #define DMA0_REGISTER_OFFSET 0x0 /* not a register */ #define DMA1_REGISTER_OFFSET 0x200 /* not a register */ - -#define DMA_RB_CNTL 0x3400 -# define DMA_RB_ENABLE (1 << 0) -# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ -# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ -# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) -# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ -# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ -#define DMA_RB_BASE 0x3401 -#define DMA_RB_RPTR 0x3402 -#define DMA_RB_WPTR 0x3403 - -#define DMA_RB_RPTR_ADDR_HI 0x3407 -#define DMA_RB_RPTR_ADDR_LO 0x3408 - -#define DMA_IB_CNTL 0x3409 -# define DMA_IB_ENABLE (1 << 0) -# define DMA_IB_SWAP_ENABLE (1 << 4) -# define CMD_VMID_FORCE (1 << 31) -#define DMA_IB_RPTR 0x340a -#define DMA_CNTL 0x340b -# define TRAP_ENABLE (1 << 0) -# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) -# define SEM_WAIT_INT_ENABLE (1 << 2) -# define DATA_SWAP_ENABLE (1 << 3) -# define FENCE_SWAP_ENABLE (1 << 4) -# define CTXEMPTY_INT_ENABLE (1 << 28) -#define DMA_STATUS_REG 0x340d -# define DMA_IDLE (1 << 0) -#define DMA_TILING_CONFIG 0x342e - -#define DMA_POWER_CNTL 0x342f -# define MEM_POWER_OVERRIDE (1 << 8) -#define DMA_CLK_CTRL 0x3430 - -#define DMA_PG 0x3435 -# define PG_CNTL_ENABLE (1 << 0) -#define DMA_PGFSM_CONFIG 0x3436 -#define DMA_PGFSM_WRITE 0x3437 +#define SDMA_MAX_INSTANCE 2 #define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ (((b) & 0x1) << 26) | \ @@ -1650,6 +581,7 @@ #define DMA_PACKET_POLL_REG_MEM 0xe #define DMA_PACKET_NOP 0xf +/* VCE */ #define VCE_STATUS 0x20004 #define VCE_VCPU_CNTL 0x20014 #define VCE_CLK_EN (1 << 0) @@ -1726,378 +658,118 @@ #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 -#define AMDGPU_MM_INDEX 0x0000 -#define AMDGPU_MM_DATA 0x0001 - -#define VERDE_NUM_CRTC 6 -#define BLACKOUT_MODE_MASK 0x00000007 -#define VGA_RENDER_CONTROL 0xC0 -#define R_000300_VGA_RENDER_CONTROL 0xC0 -#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF -#define EVERGREEN_CRTC_STATUS 0x1BA3 -#define EVERGREEN_CRTC_V_BLANK (1 << 0) -#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4 -/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ -#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d -#define EVERGREEN_CRTC_CONTROL 0x1b9c -#define EVERGREEN_CRTC_MASTER_EN (1 << 0) -#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) -#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d -#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8) -#define EVERGREEN_CRTC_V_BLANK (1 << 0) -#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8 -#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5 -#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd -#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe -#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) -#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07 -#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08 -#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04 -#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05 -#define EVERGREEN_GRPH_UPDATE 0x1a11 -#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4 -#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9 -#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) - -#define EVERGREEN_DATA_FORMAT 0x1ac0 -# define EVERGREEN_INTERLEAVE_EN (1 << 0) - -#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20) -#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20) -#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20) -#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20) - -#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45 -#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845 - -#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847 -#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47 - -#define R600_D1GRPH_SWAP_CONTROL 0x1843 -#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0) -#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0) -#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0) -#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0) - -#define AVIVO_D1VGA_CONTROL 0x00cc -# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0) -# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8) -# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9) -# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10) -# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16) -# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24) -#define AVIVO_D2VGA_CONTROL 0x00ce - -#define R600_BUS_CNTL 0x1508 -# define R600_BIOS_ROM_DIS (1 << 1) + #define R600_ROM_CNTL 0x580 # define R600_SCK_OVERWRITE (1 << 1) # define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28 # define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28) -#define FMT_BIT_DEPTH_CONTROL 0x1bf2 -#define FMT_TRUNCATE_EN (1 << 0) -#define FMT_TRUNCATE_DEPTH (1 << 4) -#define FMT_SPATIAL_DITHER_EN (1 << 8) -#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9) -#define FMT_SPATIAL_DITHER_DEPTH (1 << 12) -#define FMT_FRAME_RANDOM_ENABLE (1 << 13) -#define FMT_RGB_RANDOM_ENABLE (1 << 14) -#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15) -#define FMT_TEMPORAL_DITHER_EN (1 << 16) -#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20) -#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21) -#define FMT_TEMPORAL_LEVEL (1 << 24) -#define FMT_TEMPORAL_DITHER_RESET (1 << 25) -#define FMT_25FRC_SEL(x) ((x) << 26) -#define FMT_50FRC_SEL(x) ((x) << 28) -#define FMT_75FRC_SEL(x) ((x) << 30) - -#define EVERGREEN_DC_LUT_CONTROL 0x1a80 -#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81 -#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82 -#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83 -#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84 -#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85 -#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86 -#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c -#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79 -#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e -#define EVERGREEN_DC_LUT_RW_MODE 0x1a78 - -#define EVERGREEN_GRPH_ENABLE 0x1a00 -#define EVERGREEN_GRPH_CONTROL 0x1a01 -#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0) -#define EVERGREEN_GRPH_DEPTH_8BPP 0 -#define EVERGREEN_GRPH_DEPTH_16BPP 1 -#define EVERGREEN_GRPH_DEPTH_32BPP 2 -#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) -#define EVERGREEN_ADDR_SURF_2_BANK 0 -#define EVERGREEN_ADDR_SURF_4_BANK 1 -#define EVERGREEN_ADDR_SURF_8_BANK 2 -#define EVERGREEN_ADDR_SURF_16_BANK 3 -#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4) -#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) -#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0 -#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1 -#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2 -#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3 -#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) - -#define EVERGREEN_GRPH_FORMAT_INDEXED 0 -#define EVERGREEN_GRPH_FORMAT_ARGB1555 0 -#define EVERGREEN_GRPH_FORMAT_ARGB565 1 -#define EVERGREEN_GRPH_FORMAT_ARGB4444 2 -#define EVERGREEN_GRPH_FORMAT_AI88 3 -#define EVERGREEN_GRPH_FORMAT_MONO16 4 -#define EVERGREEN_GRPH_FORMAT_BGRA5551 5 +#define GRPH_ARRAY_LINEAR_GENERAL 0 +#define GRPH_ARRAY_LINEAR_ALIGNED 1 +#define GRPH_ARRAY_1D_TILED_THIN1 2 +#define GRPH_ARRAY_2D_TILED_THIN1 4 + +#define ES_AND_GS_AUTO 3 +#define BUF_SWAP_32BIT (2 << 16) + +#define GRPH_DEPTH_8BPP 0 +#define GRPH_DEPTH_16BPP 1 +#define GRPH_DEPTH_32BPP 2 + +/* 8 BPP */ +#define GRPH_FORMAT_INDEXED 0 + +/* 16 BPP */ +#define GRPH_FORMAT_ARGB1555 0 +#define GRPH_FORMAT_ARGB565 1 +#define GRPH_FORMAT_ARGB4444 2 +#define GRPH_FORMAT_AI88 3 +#define GRPH_FORMAT_MONO16 4 +#define GRPH_FORMAT_BGRA5551 5 /* 32 BPP */ -#define EVERGREEN_GRPH_FORMAT_ARGB8888 0 -#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1 -#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2 -#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3 -#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4 -#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 -#define EVERGREEN_GRPH_FORMAT_RGB111110 6 -#define EVERGREEN_GRPH_FORMAT_BGR101111 7 -#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) -#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0 -#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1 -#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2 -#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3 -#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0 -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1 -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2 -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3 -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4 -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5 -#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6 -#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3 -#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) -#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 -#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 -#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2 -#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2 -#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3 - -#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03 -#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) -# define EVERGREEN_GRPH_ENDIAN_NONE 0 -# define EVERGREEN_GRPH_ENDIAN_8IN16 1 -# define EVERGREEN_GRPH_ENDIAN_8IN32 2 -# define EVERGREEN_GRPH_ENDIAN_8IN64 3 -#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) -# define EVERGREEN_GRPH_RED_SEL_R 0 -# define EVERGREEN_GRPH_RED_SEL_G 1 -# define EVERGREEN_GRPH_RED_SEL_B 2 -# define EVERGREEN_GRPH_RED_SEL_A 3 -#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) -# define EVERGREEN_GRPH_GREEN_SEL_G 0 -# define EVERGREEN_GRPH_GREEN_SEL_B 1 -# define EVERGREEN_GRPH_GREEN_SEL_A 2 -# define EVERGREEN_GRPH_GREEN_SEL_R 3 -#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) -# define EVERGREEN_GRPH_BLUE_SEL_B 0 -# define EVERGREEN_GRPH_BLUE_SEL_A 1 -# define EVERGREEN_GRPH_BLUE_SEL_R 2 -# define EVERGREEN_GRPH_BLUE_SEL_G 3 -#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) -# define EVERGREEN_GRPH_ALPHA_SEL_A 0 -# define EVERGREEN_GRPH_ALPHA_SEL_R 1 -# define EVERGREEN_GRPH_ALPHA_SEL_G 2 -# define EVERGREEN_GRPH_ALPHA_SEL_B 3 - -#define EVERGREEN_D3VGA_CONTROL 0xf8 -#define EVERGREEN_D4VGA_CONTROL 0xf9 -#define EVERGREEN_D5VGA_CONTROL 0xfa -#define EVERGREEN_D6VGA_CONTROL 0xfb - -#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00 - -#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02 -#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8) - -#define EVERGREEN_GRPH_PITCH 0x1a06 -#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07 -#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08 -#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09 -#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a -#define EVERGREEN_GRPH_X_START 0x1a0b -#define EVERGREEN_GRPH_Y_START 0x1a0c -#define EVERGREEN_GRPH_X_END 0x1a0d -#define EVERGREEN_GRPH_Y_END 0x1a0e -#define EVERGREEN_GRPH_UPDATE 0x1a11 -#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) -#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) -#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12 -#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) - -#define EVERGREEN_VIEWPORT_START 0x1b5c -#define EVERGREEN_VIEWPORT_SIZE 0x1b5d -#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1 - -/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ -#define EVERGREEN_CUR_CONTROL 0x1a66 -# define EVERGREEN_CURSOR_EN (1 << 0) -# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8) -# define EVERGREEN_CURSOR_MONO 0 -# define EVERGREEN_CURSOR_24_1 1 -# define EVERGREEN_CURSOR_24_8_PRE_MULT 2 -# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3 -# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16) -# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20) -# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24) -# define EVERGREEN_CURSOR_URGENT_ALWAYS 0 -# define EVERGREEN_CURSOR_URGENT_1_8 1 -# define EVERGREEN_CURSOR_URGENT_1_4 2 -# define EVERGREEN_CURSOR_URGENT_3_8 3 -# define EVERGREEN_CURSOR_URGENT_1_2 4 -#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67 -# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000 -#define EVERGREEN_CUR_SIZE 0x1a68 -#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69 -#define EVERGREEN_CUR_POSITION 0x1a6a -#define EVERGREEN_CUR_HOT_SPOT 0x1a6b -#define EVERGREEN_CUR_COLOR1 0x1a6c -#define EVERGREEN_CUR_COLOR2 0x1a6d -#define EVERGREEN_CUR_UPDATE 0x1a6e -# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0) -# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1) -# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16) -# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24) - - -#define NI_INPUT_CSC_CONTROL 0x1a35 -# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0) -# define NI_INPUT_CSC_BYPASS 0 -# define NI_INPUT_CSC_PROG_COEFF 1 -# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2 -# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4) - -#define NI_OUTPUT_CSC_CONTROL 0x1a3c -# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0) -# define NI_OUTPUT_CSC_BYPASS 0 -# define NI_OUTPUT_CSC_TV_RGB 1 -# define NI_OUTPUT_CSC_YCBCR_601 2 -# define NI_OUTPUT_CSC_YCBCR_709 3 -# define NI_OUTPUT_CSC_PROG_COEFF 4 -# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5 -# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4) - -#define NI_DEGAMMA_CONTROL 0x1a58 -# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0) -# define NI_DEGAMMA_BYPASS 0 -# define NI_DEGAMMA_SRGB_24 1 -# define NI_DEGAMMA_XVYCC_222 2 -# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4) -# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8) -# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12) - -#define NI_GAMUT_REMAP_CONTROL 0x1a59 -# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0) -# define NI_GAMUT_REMAP_BYPASS 0 -# define NI_GAMUT_REMAP_PROG_COEFF 1 -# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2 -# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3 -# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4) - -#define NI_REGAMMA_CONTROL 0x1aa0 -# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0) -# define NI_REGAMMA_BYPASS 0 -# define NI_REGAMMA_SRGB_24 1 -# define NI_REGAMMA_XVYCC_222 2 -# define NI_REGAMMA_PROG_A 3 -# define NI_REGAMMA_PROG_B 4 -# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4) - - -#define NI_PRESCALE_GRPH_CONTROL 0x1a2d -# define NI_GRPH_PRESCALE_BYPASS (1 << 4) - -#define NI_PRESCALE_OVL_CONTROL 0x1a31 -# define NI_OVL_PRESCALE_BYPASS (1 << 4) - -#define NI_INPUT_GAMMA_CONTROL 0x1a10 -# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0) -# define NI_INPUT_GAMMA_USE_LUT 0 -# define NI_INPUT_GAMMA_BYPASS 1 -# define NI_INPUT_GAMMA_SRGB_24 2 -# define NI_INPUT_GAMMA_XVYCC_222 3 -# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4) - -#define BLACKOUT_MODE_MASK 0x00000007 -#define VGA_RENDER_CONTROL 0xC0 -#define R_000300_VGA_RENDER_CONTROL 0xC0 -#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF -#define EVERGREEN_CRTC_STATUS 0x1BA3 -#define EVERGREEN_CRTC_V_BLANK (1 << 0) -#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4 -/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ -#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d -#define EVERGREEN_CRTC_CONTROL 0x1b9c -# define EVERGREEN_CRTC_MASTER_EN (1 << 0) -# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) -#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d -# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8) -# define EVERGREEN_CRTC_V_BLANK (1 << 0) -#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8 -#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5 -#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd -#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe -#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) -#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07 -#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08 -#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04 -#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05 -#define EVERGREEN_GRPH_UPDATE 0x1a11 -#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4 -#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9 -#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) - -#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10 -#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4 -#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80 -#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7 -#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400 -#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa -#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000 -#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd -#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000 -#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 -#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000 -#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13 - -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000 -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19 -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0 -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000 -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000 -#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18 - -#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7 -#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0 - -#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1 -#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0 -#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2 -#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1 - -#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000 -#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11 -#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800 -#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb +#define GRPH_FORMAT_ARGB8888 0 +#define GRPH_FORMAT_ARGB2101010 1 +#define GRPH_FORMAT_32BPP_DIG 2 +#define GRPH_FORMAT_8B_ARGB2101010 3 +#define GRPH_FORMAT_BGRA1010102 4 +#define GRPH_FORMAT_8B_BGRA1010102 5 +#define GRPH_FORMAT_RGB111110 6 +#define GRPH_FORMAT_BGR101111 7 + +#define GRPH_ENDIAN_NONE 0 +#define GRPH_ENDIAN_8IN16 1 +#define GRPH_ENDIAN_8IN32 2 +#define GRPH_ENDIAN_8IN64 3 +#define GRPH_RED_SEL_R 0 +#define GRPH_RED_SEL_G 1 +#define GRPH_RED_SEL_B 2 +#define GRPH_RED_SEL_A 3 + +#define GRPH_GREEN_SEL_G 0 +#define GRPH_GREEN_SEL_B 1 +#define GRPH_GREEN_SEL_A 2 +#define GRPH_GREEN_SEL_R 3 + +#define GRPH_BLUE_SEL_B 0 +#define GRPH_BLUE_SEL_A 1 +#define GRPH_BLUE_SEL_R 2 +#define GRPH_BLUE_SEL_G 3 + +#define GRPH_ALPHA_SEL_A 0 +#define GRPH_ALPHA_SEL_R 1 +#define GRPH_ALPHA_SEL_G 2 +#define GRPH_ALPHA_SEL_B 3 + +/* CUR_CONTROL */ + #define CURSOR_MONO 0 + #define CURSOR_24_1 1 + #define CURSOR_24_8_PRE_MULT 2 + #define CURSOR_24_8_UNPRE_MULT 3 + #define CURSOR_URGENT_ALWAYS 0 + #define CURSOR_URGENT_1_8 1 + #define CURSOR_URGENT_1_4 2 + #define CURSOR_URGENT_3_8 3 + #define CURSOR_URGENT_1_2 4 + +/* INPUT_CSC_CONTROL */ +# define INPUT_CSC_BYPASS 0 +# define INPUT_CSC_PROG_COEFF 1 +# define INPUT_CSC_PROG_SHARED_MATRIXA 2 + +/* OUTPUT_CSC_CONTROL */ +# define OUTPUT_CSC_BYPASS 0 +# define OUTPUT_CSC_TV_RGB 1 +# define OUTPUT_CSC_YCBCR_601 2 +# define OUTPUT_CSC_YCBCR_709 3 +# define OUTPUT_CSC_PROG_COEFF 4 +# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5 + +/* DEGAMMA_CONTROL */ +# define DEGAMMA_BYPASS 0 +# define DEGAMMA_SRGB_24 1 +# define DEGAMMA_XVYCC_222 2 + +/* GAMUT_REMAP_CONTROL */ +# define GAMUT_REMAP_BYPASS 0 +# define GAMUT_REMAP_PROG_COEFF 1 +# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2 +# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3 + +/* REGAMMA_CONTROL */ +# define REGAMMA_BYPASS 0 +# define REGAMMA_SRGB_24 1 +# define REGAMMA_XVYCC_222 2 +# define REGAMMA_PROG_A 3 +# define REGAMMA_PROG_B 4 + + +/* INPUT_GAMMA_CONTROL */ +# define INPUT_GAMMA_USE_LUT 0 +# define INPUT_GAMMA_BYPASS 1 +# define INPUT_GAMMA_SRGB_24 2 +# define INPUT_GAMMA_XVYCC_222 3 #define MC_SEQ_MISC0__MT__MASK 0xf0000000 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 @@ -2113,20 +785,9 @@ #define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) #define PACKET3_SEM_SEL_WAIT (0x7 << 29) -#define CONFIG_CNTL 0x1509 -#define CC_DRM_ID_STRAPS 0X1559 #define AMDGPU_PCIE_INDEX 0xc #define AMDGPU_PCIE_DATA 0xd -#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411 -#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412 -#define DMA_MODE 0x342f -#define DMA_RB_RPTR_ADDR_HI 0x3407 -#define DMA_RB_RPTR_ADDR_LO 0x3408 -#define DMA_BUSY_MASK 0x20 -#define DMA1_BUSY_MASK 0X40 -#define SDMA_MAX_INSTANCE 2 - #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) #define PCIE_PORT_INDEX 0xe @@ -2136,8 +797,6 @@ #define EVERGREEN_PIF_PHY1_INDEX 0x10 #define EVERGREEN_PIF_PHY1_DATA 0x14 -#define MC_VM_FB_OFFSET 0x81a - /* Discrete VCE clocks */ #define CG_VCEPLL_FUNC_CNTL 0xc0030600 #define VCEPLL_RESET_MASK 0x00000001 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 659eab9b90be..c457be3a3c56 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -584,6 +584,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) * Enable triggering of GPU reset only if specified * by module parameter. */ + if (adev->pcie_reset_ctx.in_link_reset) + return AMD_RESET_METHOD_LINK; if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5) return AMD_RESET_METHOD_MODE2; else if (!(adev->flags & AMD_IS_APU)) @@ -640,6 +642,9 @@ asic_reset: case AMD_RESET_METHOD_MODE2: dev_info(adev->dev, "MODE2 reset\n"); return amdgpu_dpm_mode2_reset(adev); + case AMD_RESET_METHOD_LINK: + dev_info(adev->dev, "Link reset\n"); + return amdgpu_device_link_reset(adev); default: dev_info(adev->dev, "MODE1 reset\n"); return amdgpu_device_mode1_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index ef7c603b50ae..c8ac11a9cdef 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); -void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index a5000c171c02..cf93fa477674 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -552,6 +552,11 @@ # define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2) # define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25) +#define PACKET3_RUN_CLEANER_SHADER_9_0 0xD7 +/* 1. header + * 2. RESERVED [31:0] + */ + #define PACKET3_RUN_CLEANER_SHADER 0xD2 /* 1. header * 2. RESERVED [31:0] diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index a3b5fda22432..8a3f326474e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -92,6 +92,9 @@ enum ta_ras_block { TA_RAS_BLOCK__MCA, TA_RAS_BLOCK__VCN, TA_RAS_BLOCK__JPEG, + TA_RAS_BLOCK__IH, + TA_RAS_BLOCK__MPIO, + TA_RAS_BLOCK__MMSCH, TA_NUM_BLOCK_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 74f57b2d30a5..e590cbdd8de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -85,7 +85,8 @@ bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_sta return (amdgpu_ras_is_poison_mode_supported(adev) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); + ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) || + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison) == 1))); } bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) @@ -173,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, umc_v12_0_reset_error_count(adev); } +static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; + uint32_t vram_type = adev->gmc.vram_type; + struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits); + + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + + /* default setting */ + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT; + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT; + flip_bits->flip_row_bit = 13; + flip_bits->bit_num = 4; + flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT; + + if (nps == AMDGPU_NPS2_PARTITION_MODE) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT; + } else if (nps == AMDGPU_NPS4_PARTITION_MODE) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT; + } + + switch (vram_type) { + case AMDGPU_VRAM_TYPE_HBM: + /* other nps modes are taken as nps1 */ + if (nps == AMDGPU_NPS2_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + else if (nps == AMDGPU_NPS4_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + + break; + case AMDGPU_VRAM_TYPE_HBM3E: + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + flip_bits->flip_row_bit = 12; + + if (nps == AMDGPU_NPS2_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + else if (nps == AMDGPU_NPS4_PARTITION_MODE) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT; + + break; + default: + dev_warn(adev->dev, + "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n"); + break; + } + + adev->umc.retire_unit = 0x1 << flip_bits->bit_num; +} + static int umc_v12_0_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, struct ta_ras_query_address_input *addr_in, struct ta_ras_query_address_output *addr_out, bool dump_addr) { - uint32_t col, col_lower, row, row_lower, bank; + uint32_t col, col_lower, row, row_lower, row_high, bank; uint32_t channel_index = 0, umc_inst = 0; - uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS]; + uint32_t i, bit_num, retire_unit, *flip_bits; uint64_t soc_pa, column, err_addr; struct ta_ras_query_address_output addr_out_tmp; struct ta_ras_query_address_output *paddr_out; - enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; int ret = 0; if (!addr_out) @@ -210,46 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev, umc_inst = addr_in->ma.umc_inst; } - loop_bits[0] = UMC_V12_0_PA_C2_BIT; - loop_bits[1] = UMC_V12_0_PA_C3_BIT; - loop_bits[2] = UMC_V12_0_PA_C4_BIT; - loop_bits[3] = UMC_V12_0_PA_R13_BIT; - - if (adev->gmc.gmc_funcs->query_mem_partition_mode) - nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - - /* other nps modes are taken as nps1 */ - if (nps == AMDGPU_NPS4_PARTITION_MODE) { - loop_bits[0] = UMC_V12_0_PA_CH4_BIT; - loop_bits[1] = UMC_V12_0_PA_CH5_BIT; - loop_bits[2] = UMC_V12_0_PA_B0_BIT; - loop_bits[3] = UMC_V12_0_PA_R11_BIT; - } + flip_bits = adev->umc.flip_bits.flip_bits_in_pa; + bit_num = adev->umc.flip_bits.bit_num; + retire_unit = adev->umc.retire_unit; soc_pa = paddr_out->pa.pa; channel_index = paddr_out->pa.channel_idx; /* clear loop bits in soc physical address */ - for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++) - soc_pa &= ~BIT_ULL(loop_bits[i]); + for (i = 0; i < bit_num; i++) + soc_pa &= ~BIT_ULL(flip_bits[i]); paddr_out->pa.pa = soc_pa; /* get column bit 0 and 1 in mca address */ col_lower = (err_addr >> 1) & 0x3ULL; - /* MA_R13_BIT will be handled later */ + /* extra row bit will be handled later */ row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL; + row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit); + + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) { + row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL; + /* it's 2.25GB in each channel, from MCA address to PA + * [R14 R13] is converted if the two bits value are 0x3, + * get them from PA instead of MCA address. + */ + row_lower |= (row_high << 13); + } if (!err_data && !dump_addr) goto out; /* loop for all possibilities of retired bits */ - for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) { + for (column = 0; column < retire_unit; column++) { soc_pa = paddr_out->pa.pa; - for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++) - soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]); + for (i = 0; i < bit_num; i++) + soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]); col = ((column & 0x7) << 2) | col_lower; - /* add row bit 13 */ - row = ((column >> 3) << 13) | row_lower; + /* handle extra row bit */ + if (bit_num == RETIRE_FLIP_BITS_NUM) + row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) | + row_lower; if (dump_addr) dev_info(adev->dev, @@ -427,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank bank->regs[ACA_REG_IDX_ADDR]); ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); - count = ext_error_code == 0 ? - ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; + if (umc_v12_0_is_deferred_error(adev, status)) + count = ext_error_code == 0 ? + adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL; + else + count = ext_error_code == 0 ? + ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; return aca_error_cache_log_bank_error(handle, &info, err_type, count); } @@ -468,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, uint64_t err_addr, pa_addr = 0; struct ras_ecc_err *ecc_err; struct ta_ras_query_address_output addr_out; - enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE; - uint32_t shift_bit = UMC_V12_0_PA_C4_BIT; + uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2]; int count, ret, i; hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); @@ -514,11 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT; ecc_err->channel_idx = addr_out.pa.channel_idx; - if (adev->gmc.gmc_funcs->query_mem_partition_mode) - nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); - if (nps == AMDGPU_NPS4_PARTITION_MODE) - shift_bit = UMC_V12_0_PA_B0_BIT; - /* If converted pa_pfn is 0, use pa C4 pfn. */ if (!ecc_err->pa_pfn) ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT; @@ -664,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = { .update_ecc_status = umc_v12_0_update_ecc_status, .convert_ras_err_addr = umc_v12_0_convert_error_address, .get_die_id_from_pa = umc_v12_0_get_die_id, + .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index 9298018d938f..63b7e7254526 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -55,8 +55,6 @@ #define UMC_V12_0_NA_MAP_PA_NUM 8 /* R13 bit shift should be considered, double the number */ #define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) -/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */ -#define UMC_V12_0_RETIRE_LOOP_BITS 4 /* column bits in SOC physical address */ #define UMC_V12_0_PA_C2_BIT 15 @@ -64,13 +62,16 @@ #define UMC_V12_0_PA_C4_BIT 21 /* row bits in SOC physical address */ #define UMC_V12_0_PA_R0_BIT 22 +#define UMC_V12_0_PA_R10_BIT 32 #define UMC_V12_0_PA_R11_BIT 33 +#define UMC_V12_0_PA_R12_BIT 34 #define UMC_V12_0_PA_R13_BIT 35 /* channel bit in SOC physical address */ #define UMC_V12_0_PA_CH4_BIT 12 #define UMC_V12_0_PA_CH5_BIT 13 /* bank bit in SOC physical address */ #define UMC_V12_0_PA_B0_BIT 19 +#define UMC_V12_0_PA_B1_BIT 20 /* row bits in MCA address */ #define UMC_V12_0_MA_R0_BIT 10 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 21b57c29bf7d..c74947705d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1009,6 +1009,11 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst) jpeg_v1_0_start(adev, 0); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } @@ -1154,6 +1159,11 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst) jpeg_v1_0_start(adev, 1); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } @@ -1216,6 +1226,12 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst) vcn_v1_0_enable_clock_gating(vinst); vcn_1_0_enable_static_power_gating(vinst); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } @@ -1250,6 +1266,11 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 8e7a36f26e9c..68b4371df0f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -39,6 +39,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503 @@ -97,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst, static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); +static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst); + /** * vcn_v2_0_early_init - set function pointers and load microcode * @@ -212,6 +215,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) } adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode; + adev->vcn.inst[0].reset = vcn_v2_0_reset; + + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -232,6 +241,10 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } @@ -259,6 +272,8 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; + amdgpu_vcn_sysfs_reset_mask_fini(adev); + r = amdgpu_vcn_sw_fini(adev, 0); kfree(adev->vcn.ip_dump); @@ -977,6 +992,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) /* Unstall DPG */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } @@ -1151,6 +1172,11 @@ static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } @@ -1182,6 +1208,11 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(UVD, 0, mmUVD_STATUS); + return 0; } @@ -1247,6 +1278,11 @@ static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst) vcn_v2_0_enable_clock_gating(vinst); vcn_v2_0_enable_static_power_gating(vinst); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, 0, mmUVD_STATUS); + power_off: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, 0); @@ -1333,6 +1369,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, return 0; } +static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v2_0_stop(vinst); + if (r) + return r; + return vcn_v2_0_start(vinst); +} + static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -2154,6 +2200,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { @@ -2183,6 +2230,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index d716510b8dd6..bc30a5326866 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -39,6 +39,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f @@ -101,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev); +static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst); static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, @@ -403,8 +405,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + adev->vcn.inst[j].reset = vcn_v2_5_reset; } + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -424,6 +432,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } @@ -454,6 +466,8 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) @@ -1157,6 +1171,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); + return 0; } @@ -1342,6 +1361,11 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, mmUVD_STATUS); + return 0; } @@ -1568,6 +1592,11 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); + return 0; } @@ -1634,6 +1663,10 @@ static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst) UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, mmUVD_STATUS); done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, i); @@ -1796,6 +1829,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; /** @@ -1894,6 +1928,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -1922,6 +1957,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) } } +static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v2_5_stop(vinst); + if (r) + return r; + return vcn_v2_5_start(vinst); +} + static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 22ae1939476f..4b8f4407047f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -40,6 +40,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f @@ -109,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); +static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst); static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); @@ -288,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode; + adev->vcn.inst[i].reset = vcn_v3_0_reset; } + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -305,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } @@ -337,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) @@ -1172,6 +1186,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); + return 0; } @@ -1359,6 +1378,11 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst) fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); } + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, mmUVD_STATUS); + return 0; } @@ -1601,6 +1625,11 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); + return 0; } @@ -1673,6 +1702,11 @@ static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst) /* enable VCN power gating */ vcn_v3_0_enable_static_power_gating(vinst); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, mmUVD_STATUS); + done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, i); @@ -2012,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; /** @@ -2110,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -2143,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) } } +static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v3_0_stop(vinst); + if (r) + return r; + vcn_v3_0_enable_clock_gating(vinst); + vcn_v3_0_enable_static_power_gating(vinst); + return vcn_v3_0_start(vinst); +} + static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index c6f6392c1c20..1924e075b66f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -46,6 +46,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_HARVEST_MMSCH 0 @@ -238,9 +239,10 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode; } - /* TODO: Add queue reset mask when FW fully supports it */ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); @@ -614,7 +616,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); } /** @@ -1120,6 +1123,11 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); + return 0; } @@ -1301,6 +1309,11 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, regUVD_STATUS); + return 0; } @@ -1581,6 +1594,11 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); } /** @@ -1664,6 +1682,11 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst) /* enable VCN power gating */ vcn_v4_0_enable_static_power_gating(vinst); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, regUVD_STATUS); + done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, i); @@ -1945,6 +1968,24 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, return 0; } +static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v4_0_stop(vinst); + if (r) + return r; + r = vcn_v4_0_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1974,6 +2015,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = vcn_v4_0_ring_reset, }; /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 7446ecc55714..2a3663b551af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -45,6 +45,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = { SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), @@ -168,6 +169,10 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + /* VCN POISON TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_sw_init(adev, i); @@ -287,6 +292,31 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) return 0; } +static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst) +{ + int vcn_inst; + struct amdgpu_device *adev = vinst->adev; + struct amdgpu_ring *ring; + int inst_idx = vinst->inst; + + vcn_inst = GET_INST(VCN, inst_idx); + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + if (ring->use_doorbell) { + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst, + adev->vcn.inst[inst_idx].aid_id); + + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); + } + + return 0; +} + /** * vcn_v4_0_3_hw_init - start and test VCN block * @@ -298,7 +328,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; - int i, r, vcn_inst; + struct amdgpu_vcn_inst *vinst; + int i, r; if (amdgpu_sriov_vf(adev)) { r = vcn_v4_0_3_start_sriov(adev); @@ -321,28 +352,9 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn4_fw_shared *fw_shared; - vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; - - if (ring->use_doorbell) { - adev->nbio.funcs->vcn_doorbell_range( - adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 9 * vcn_inst, - adev->vcn.inst[i].aid_id); - - WREG32_SOC15( - VCN, GET_INST(VCN, ring->me), - regVCN_RB1_DB_CTRL, - ring->doorbell_index - << VCN_RB1_DB_CTRL__OFFSET__SHIFT | - VCN_RB1_DB_CTRL__EN_MASK); - - /* Read DB_CTRL to flush the write DB_CTRL command. */ - RREG32_SOC15( - VCN, GET_INST(VCN, ring->me), - regVCN_RB1_DB_CTRL); - } + vinst = &adev->vcn.inst[i]; + vcn_v4_0_3_hw_init_inst(vinst); /* Re-init fw_shared when RAS fatal error occurred */ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; @@ -379,6 +391,9 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block) vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); } + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); + return 0; } @@ -962,6 +977,11 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst, /*resetting done, fw can check RB ring */ fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + return 0; } @@ -1355,6 +1375,12 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + return 0; } @@ -1438,6 +1464,11 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst) /* apply HW clock gating */ vcn_v4_0_3_enable_clock_gating(vinst); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + Done: return 0; } @@ -1563,6 +1594,34 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring) } } +static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r = 0; + int vcn_inst; + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + vcn_inst = GET_INST(VCN, ring->me); + r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst); + + if (r) { + DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r); + return r; + } + + /* This flag is not set for VF, assumed to be disabled always */ + if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) + adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); + vcn_v4_0_3_hw_init_inst(vinst); + vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram); + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1591,6 +1650,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = vcn_v4_0_3_ring_reset, }; /** @@ -1774,11 +1834,24 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, return 0; } +static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = { .set = vcn_v4_0_3_set_interrupt_state, .process = vcn_v4_0_3_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = { + .set = vcn_v4_0_3_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + /** * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions * @@ -1794,6 +1867,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst->irq.num_types++; } adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; + + adev->vcn.inst->ras_poison_irq.num_types = 1; + adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs; } static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -1941,9 +2017,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev) vcn_v4_0_3_inst_reset_ras_error_count(adev, i); } +static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev, + uint32_t instance, uint32_t sub_block) +{ + uint32_t poison_stat = 0, reg_value = 0; + + switch (sub_block) { + case AMDGPU_VCN_V4_0_3_VCPU_VCODEC: + reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); + break; + default: + break; + } + + if (poison_stat) + dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", + instance, sub_block); + + return poison_stat; +} + +static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev) +{ + uint32_t inst, sub; + uint32_t poison_stat = 0; + + for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) + for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++) + poison_stat += + vcn_v4_0_3_query_poison_by_instance(adev, inst, sub); + + return !!poison_stat; +} + static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = { .query_ras_error_count = vcn_v4_0_3_query_ras_error_count, .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count, + .query_poison_status = vcn_v4_0_3_query_poison_status, }; static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, @@ -1965,7 +2076,7 @@ static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank 1ULL); break; case ACA_SMU_TYPE_CE: - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); + bank->aca_err_type = ACA_ERROR_TYPE_CE; ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, ACA_REG__MISC0__ERRCNT(misc0)); break; @@ -2019,6 +2130,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_commo if (r) return r; + if (amdgpu_ras_is_supported(adev, ras_block->block) && + adev->vcn.inst->ras_poison_irq.funcs) { + r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0); + if (r) + goto late_fini; + } + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, &vcn_v4_0_3_aca_info, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h index 03572a1d0c9c..aeab89853a92 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h @@ -24,6 +24,12 @@ #ifndef __VCN_V4_0_3_H__ #define __VCN_V4_0_3_H__ +enum amdgpu_vcn_v4_0_3_sub_block { + AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0, + + AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK, +}; + extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block; void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index ba603b2246e2..caf2d95a85d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -46,6 +46,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000) +#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000) #define VCN_HARVEST_MMSCH 0 @@ -207,6 +208,10 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; + fw_shared->drm_key_wa.method = + AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; + if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); @@ -214,6 +219,14 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode; } + adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -1022,6 +1035,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL); + return 0; } @@ -1204,6 +1221,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + return 0; } @@ -1234,6 +1255,11 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); } /** @@ -1317,6 +1343,11 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst) /* enable VCN power gating */ vcn_v4_0_5_enable_static_power_gating(vinst); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, regUVD_STATUS); + done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, i); @@ -1435,6 +1466,24 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) } } +static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v4_0_5_stop(vinst); + if (r) + return r; + r = vcn_v4_0_5_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1462,6 +1511,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = vcn_v4_0_5_ring_reset, }; /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index d99d05f42f1d..07a6e9582880 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -196,9 +196,10 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode; } - /* TODO: Add queue reset mask when FW fully supports it */ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; vcn_v5_0_0_alloc_ip_dump(adev); @@ -533,7 +534,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, /* VCN global tiling registers */ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); return; } @@ -793,6 +795,11 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); + return 0; } @@ -945,6 +952,11 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, regUVD_STATUS); + return 0; } @@ -976,6 +988,11 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); + return; } @@ -1057,6 +1074,11 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst) /* enable VCN power gating */ vcn_v5_0_0_enable_static_power_gating(vinst); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, i, regUVD_STATUS); + done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, i); @@ -1171,6 +1193,24 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring) } } +static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v5_0_0_stop(vinst); + if (r) + return r; + r = vcn_v5_0_0_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1198,6 +1238,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = vcn_v5_0_0_ring_reset, }; /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 581d8629b9d9..cdefd7fcb0da 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -30,6 +30,7 @@ #include "soc15_hw_ip.h" #include "vcn_v2_0.h" #include "vcn_v4_0_3.h" +#include "mmsch_v5_0.h" #include "vcn/vcn_5_0_0_offset.h" #include "vcn/vcn_5_0_0_sh_mask.h" @@ -39,12 +40,13 @@ #include <drm/drm_drv.h> +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev); static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring); - +static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); /** * vcn_v5_0_1_early_init - set function pointers and load microcode * @@ -64,6 +66,7 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) vcn_v5_0_1_set_unified_ring_funcs(adev); vcn_v5_0_1_set_irq_funcs(adev); + vcn_v5_0_1_set_ras_funcs(adev); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state; @@ -111,6 +114,10 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; + /* VCN POISON TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { vcn_inst = GET_INST(VCN, i); @@ -126,7 +133,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 32 * vcn_inst; ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); @@ -143,6 +157,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + vcn_v5_0_0_alloc_ip_dump(adev); return amdgpu_vcn_sysfs_reset_mask_init(adev); @@ -172,6 +192,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) drm_dev_exit(idx); } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) @@ -204,24 +227,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_ring *ring; int i, r, vcn_inst; - if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) - adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - vcn_inst = GET_INST(VCN, i); - ring = &adev->vcn.inst[i].ring_enc[0]; - - if (ring->use_doorbell) - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + - 9 * vcn_inst), - adev->vcn.inst[i].aid_id); - - /* Re-init fw_shared, if required */ - vcn_v5_0_1_fw_shared_init(adev, i); - - r = amdgpu_ring_test_helper(ring); + if (amdgpu_sriov_vf(adev)) { + r = vcn_v5_0_1_start_sriov(adev); if (r) return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v5_0_1_unified_ring_set_wptr(ring); + ring->sched.ready = true; + } + } else { + if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) + adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + vcn_inst = GET_INST(VCN, i); + ring = &adev->vcn.inst[i].ring_enc[0]; + + if (ring->use_doorbell) + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst), + adev->vcn.inst[i].aid_id); + + /* Re-init fw_shared, if required */ + vcn_v5_0_1_fw_shared_init(adev, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } } return 0; @@ -247,6 +284,9 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); } + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); + return 0; } @@ -503,6 +543,52 @@ static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst) } /** + * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode + * + * @vinst: VCN instance + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state) +{ + struct amdgpu_device *adev = vinst->adev; + uint32_t reg_data = 0; + int vcn_inst; + + vcn_inst = GET_INST(VCN, vinst->inst); + + /* pause/unpause if state is changed */ + if (vinst->pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", + vinst->pause_state.fw_based, new_state->fw_based, + new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); + reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + } else { + /* unpause DPG, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + } + vinst->pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + + +/** * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode * * @vinst: VCN instance @@ -518,6 +604,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; int vcn_inst; uint32_t tmp; @@ -582,6 +669,12 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, if (indirect) amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + /* resetting ring, fw should not check RB ring */ + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + + /* Pause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); @@ -591,7 +684,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); - fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); @@ -602,6 +695,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB1_EN_MASK; WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + /* resetting done, fw can check RB ring */ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, @@ -613,6 +707,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, return 0; } +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) +{ + int i, vcn_inst; + struct amdgpu_ring *ring_enc; + uint64_t cache_addr; + uint64_t rb_enc_addr; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t offset, cache_size; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + uint32_t init_status; + uint32_t enabled_vcn; + + struct mmsch_v5_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v5_0_cmd_direct_read_modify_write + direct_rd_mod_wt = { {0} }; + struct mmsch_v5_0_cmd_end end = { {0} }; + struct mmsch_v5_0_init_header header; + + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + vcn_v5_0_1_fw_shared_init(adev, vcn_inst); + + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + table_size = 0; + + MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + + offset = 0; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = cache_size; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE0), + cache_size); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET1), 0); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE; + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET2), 0); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); + + fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr; + rb_setup = &fw_shared->rb_setup; + + ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0]; + ring_enc->wptr = 0; + rb_enc_addr = ring_enc->gpu_addr; + + rb_setup->is_rb_enabled_flags |= RB_ENABLED; + rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); + rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); + rb_setup->rb_size = ring_enc->ring_size / 4; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); + MMSCH_V5_0_INSERT_END(); + + header.vcn0.init_status = 0; + header.vcn0.table_offset = header.total_size; + header.vcn0.table_size = table_size; + header.total_size += table_size; + + /* Send init table to mmsch */ + size = sizeof(struct mmsch_v5_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + while (resp != expected) { + resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP); + if (resp != 0) + break; + + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + + enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; + init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status; + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE + && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ + "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); + } + } + + return 0; +} + /** * vcn_v5_0_1_start - VCN start * @@ -759,6 +1042,11 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + return 0; } @@ -775,9 +1063,13 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) int inst_idx = vinst->inst; uint32_t tmp; int vcn_inst; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; vcn_inst = GET_INST(VCN, inst_idx); + /* Unpause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); @@ -789,6 +1081,11 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); } /** @@ -864,6 +1161,11 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst) /* clear status */ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + return 0; } @@ -1049,8 +1351,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state) { + struct amdgpu_device *adev = vinst->adev; int ret = 0; + /* for SRIOV, guest should not control VCN Power-gating + * MMSCH FW should control Power-gating and clock-gating + * guest should avoid touching CGC and PG + */ + if (amdgpu_sriov_vf(adev)) { + vinst->cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == vinst->cur_state) return 0; @@ -1106,10 +1418,24 @@ static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgp return 0; } +static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = { .process = vcn_v5_0_1_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = { + .set = vcn_v5_0_1_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + + /** * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions * @@ -1123,7 +1449,12 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) adev->vcn.inst->irq.num_types++; + adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs; + + adev->vcn.inst->ras_poison_irq.num_types = 1; + adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs; + } static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { @@ -1155,3 +1486,139 @@ const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = { .rev = 1, .funcs = &vcn_v5_0_1_ip_funcs, }; + +static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, + uint32_t instance, uint32_t sub_block) +{ + uint32_t poison_stat = 0, reg_value = 0; + + switch (sub_block) { + case AMDGPU_VCN_V5_0_1_VCPU_VCODEC: + reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); + break; + default: + break; + } + + if (poison_stat) + dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", + instance, sub_block); + + return poison_stat; +} + +static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev) +{ + uint32_t inst, sub; + uint32_t poison_stat = 0; + + for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) + for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++) + poison_stat += + vcn_v5_0_1_query_poison_by_instance(adev, inst, sub); + + return !!poison_stat; +} + +static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = { + .query_poison_status = vcn_v5_0_1_query_poison_status, +}; + +static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + struct aca_bank_info info; + u64 misc0; + int ret; + + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + bank->aca_err_type = ACA_ERROR_TYPE_UE; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + bank->aca_err_type = ACA_ERROR_TYPE_CE; + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; + } + + return ret; +} + +/* reference to smu driver if header file */ +static int vcn_v5_0_1_err_codes[] = { + 14, 15, /* VCN */ +}; + +static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + vcn_v5_0_1_err_codes, + ARRAY_SIZE(vcn_v5_0_1_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = { + .aca_bank_parser = vcn_v5_0_1_aca_bank_parser, + .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid, +}; + +static const struct aca_info vcn_v5_0_1_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &vcn_v5_0_1_aca_bank_ops, +}; + +static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, + &vcn_v5_0_1_aca_info, NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + +static struct amdgpu_vcn_ras vcn_v5_0_1_ras = { + .ras_block = { + .hw_ops = &vcn_v5_0_1_ras_hw_ops, + .ras_late_init = vcn_v5_0_1_ras_late_init, + }, +}; + +static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->vcn.ras = &vcn_v5_0_1_ras; +} diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h index 8fd90bd10807..b72e4da68317 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h @@ -27,6 +27,13 @@ #define regVCN_RRMT_CNTL 0x0940 #define regVCN_RRMT_CNTL_BASE_IDX 1 + +enum amdgpu_vcn_v5_0_1_sub_block { + AMDGPU_VCN_V5_0_1_VCPU_VCODEC = 0, + + AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK, +}; + extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block; #endif /* __VCN_v5_0_1_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h index 3ca8a417c6d8..8de4ccce5e38 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h @@ -64,9 +64,6 @@ #define HEADER_BARRIER 5 #define SDMA_OP_AQL_COPY 0 #define SDMA_OP_AQL_BARRIER_OR 0 -/* vm invalidation is only available for GC9.4.3/GC9.4.4/GC9.5.0 */ -#define SDMA_OP_VM_INVALIDATE 8 -#define SDMA_SUBOP_VM_INVALIDATE 4 /*define for op field*/ #define SDMA_PKT_HEADER_op_offset 0 @@ -3334,72 +3331,5 @@ #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift 0 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_COMPLETION_SIGNAL_63_32(x) (((x) & SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_mask) << SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift) -/* -** Definitions for SDMA_PKT_VM_INVALIDATION packet -*/ - -/*define for HEADER word*/ -/*define for op field*/ -#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0 -#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF -#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0 -#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift) - -/*define for sub_op field*/ -#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0 -#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF -#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8 -#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift) - -/*define for xcc0_eng_id field*/ -#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_offset 0 -#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask 0x0000001F -#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift 16 -#define SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift) - -/*define for xcc1_eng_id field*/ -#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_offset 0 -#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask 0x0000001F -#define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift 21 -#define SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift) - -/*define for mmhub_eng_id field*/ -#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_offset 0 -#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask 0x0000001F -#define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift 26 -#define SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift) - -/*define for INVALIDATEREQ word*/ -/*define for invalidatereq field*/ -#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1 -#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF -#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0 -#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) ((x & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift) - -/*define for ADDRESSRANGELO word*/ -/*define for addressrangelo field*/ -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift) - -/*define for ADDRESSRANGEHI word*/ -/*define for invalidateack field*/ -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift) - -/*define for addressrangehi field*/ -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift) - -/*define for reserved field*/ -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23 -#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift) #endif /* __SDMA_PKT_OPEN_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index faa0dd75dd6d..85846fd08ce4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -350,6 +350,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) if (ret) return ret; } + ih[i]->overflow = false; } if (!amdgpu_sriov_vf(adev)) @@ -437,7 +438,10 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + if (!amdgpu_sriov_vf(adev)) + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + else + ih->overflow = true; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 32). Hopefully diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 86d8bc10d90a..9b3510e53112 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -239,6 +239,13 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = .max_pixels_per_frame = 4096 * 4096, .max_level = 186, }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 0, + }, }; static const struct amdgpu_video_codecs cz_video_codecs_decode = diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index d3c3d3ab7225..16e12c9913f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -5,7 +5,7 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64) + depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT)) select HMM_MIRROR select MMU_NOTIFIER select DRM_AMDGPU_USERPTR diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 981d9adcc5e1..73acbe0b7c21 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -91,7 +91,6 @@ static void cik_event_interrupt_wq(struct kfd_node *dev, const struct cik_ih_ring_entry *ihre = (const struct cik_ih_ring_entry *)ih_ring_entry; uint32_t context_id = ihre->data & 0xfffffff; - unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8; u32 pasid = (ihre->ring_id & 0xffff0000) >> 16; if (pasid == 0) @@ -125,11 +124,7 @@ static void cik_event_interrupt_wq(struct kfd_node *dev, return; } - if (info.vmid == vmid) - kfd_signal_vm_fault_event(pdd, &info, NULL); - else - kfd_signal_vm_fault_event(pdd, &info, NULL); - + kfd_signal_vm_fault_event(pdd, &info, NULL); kfd_unref_process(p); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1e9dd00620bf..828a9ceef1e7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -22,7 +22,6 @@ */ #include <linux/device.h> -#include <linux/export.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/file.h> @@ -2039,9 +2038,7 @@ static int criu_get_process_object_info(struct kfd_process *p, num_events = kfd_get_num_events(p); - ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size); - if (ret) - return ret; + svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size); *num_objects = num_queues + num_events + num_svm_ranges; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index 4a5a0a4e00f2..9bde2c64540f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -27,6 +27,16 @@ #include "kfd_priv.h" static struct dentry *debugfs_root; +static struct dentry *debugfs_proc; +static struct list_head procs; + +struct debugfs_proc_entry { + struct list_head list; + struct dentry *proc_dentry; + pid_t pid; +}; + +#define MAX_DEBUGFS_FILENAME_LEN 32 static int kfd_debugfs_open(struct inode *inode, struct file *file) { @@ -92,6 +102,8 @@ static const struct file_operations kfd_debugfs_hang_hws_fops = { void kfd_debugfs_init(void) { debugfs_root = debugfs_create_dir("kfd", NULL); + debugfs_proc = debugfs_create_dir("proc", debugfs_root); + INIT_LIST_HEAD(&procs); debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root, kfd_debugfs_mqds_by_process, &kfd_debugfs_fops); @@ -107,5 +119,69 @@ void kfd_debugfs_init(void) void kfd_debugfs_fini(void) { + debugfs_remove_recursive(debugfs_proc); debugfs_remove_recursive(debugfs_root); } + +static ssize_t kfd_debugfs_pasid_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct kfd_process_device *pdd = file_inode(file)->i_private; + char tmp[32]; + int len; + + len = snprintf(tmp, sizeof(tmp), "%u\n", pdd->pasid); + + return simple_read_from_buffer(buf, count, ppos, tmp, len); +} + +static const struct file_operations kfd_debugfs_pasid_fops = { + .owner = THIS_MODULE, + .read = kfd_debugfs_pasid_read, +}; + +void kfd_debugfs_add_process(struct kfd_process *p) +{ + int i; + char name[MAX_DEBUGFS_FILENAME_LEN]; + struct debugfs_proc_entry *entry; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + list_add(&entry->list, &procs); + entry->pid = p->lead_thread->pid; + snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "%d", + (int)entry->pid); + entry->proc_dentry = debugfs_create_dir(name, debugfs_proc); + + /* Create debugfs files for each GPU: + * - proc/<pid>/pasid_<gpuid> + */ + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + + snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "pasid_%u", + pdd->dev->id); + debugfs_create_file((const char *)name, S_IFREG | 0444, + entry->proc_dentry, pdd, + &kfd_debugfs_pasid_fops); + } +} + +void kfd_debugfs_remove_process(struct kfd_process *p) +{ + struct debugfs_proc_entry *entry, *next; + + mutex_lock(&kfd_processes_mutex); + list_for_each_entry_safe(entry, next, &procs, list) { + if (entry->pid != p->lead_thread->pid) + continue; + + debugfs_remove_recursive(entry->proc_dentry); + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&kfd_processes_mutex); +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index b9c82be6ce13..7e749f9b6d69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -352,11 +352,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &aldebaran_kfd2kgd; break; case IP_VERSION(9, 4, 3): - gfx_target_version = adev->rev_id >= 1 ? 90402 - : adev->flags & AMD_IS_APU ? 90400 - : 90401; - f2g = &gc_9_4_3_kfd2kgd; - break; case IP_VERSION(9, 4, 4): gfx_target_version = 90402; f2g = &gc_9_4_3_kfd2kgd; @@ -976,7 +971,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd, kfd_smi_event_update_gpu_reset(node, false, reset_context); } - kgd2kfd_suspend(kfd, false); + kgd2kfd_suspend(kfd, true); for (i = 0; i < kfd->num_nodes; i++) kfd_signal_reset_event(kfd->nodes[i]); @@ -1018,13 +1013,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) return 0; } -bool kfd_is_locked(void) +bool kfd_is_locked(struct kfd_dev *kfd) { + uint8_t id = 0; + struct kfd_node *dev; + lockdep_assert_held(&kfd_processes_mutex); - return (kfd_locked > 0); + + /* check reset/suspend lock */ + if (kfd_locked > 0) + return true; + + if (kfd) + return kfd->kfd_dev_lock > 0; + + /* check lock on all cgroup accessible devices */ + while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) { + if (!dev || kfd_devcgroup_check_permission(dev)) + continue; + + if (dev->kfd->kfd_dev_lock > 0) + return true; + } + + return false; } -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) { struct kfd_node *node; int i; @@ -1032,14 +1047,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) if (!kfd->init_complete) return; - /* for runtime suspend, skip locking kfd */ - if (!run_pm) { - mutex_lock(&kfd_processes_mutex); - /* For first KFD device suspend all the KFD processes */ - if (++kfd_locked == 1) - kfd_suspend_all_processes(); - mutex_unlock(&kfd_processes_mutex); - } + if (suspend_proc) + kgd2kfd_suspend_process(kfd); for (i = 0; i < kfd->num_nodes; i++) { node = kfd->nodes[i]; @@ -1047,7 +1056,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) } } -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) { int ret, i; @@ -1060,14 +1069,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } - /* for runtime resume, skip unlocking kfd */ - if (!run_pm) { - mutex_lock(&kfd_processes_mutex); - if (--kfd_locked == 0) - ret = kfd_resume_all_processes(); - WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); - mutex_unlock(&kfd_processes_mutex); - } + if (resume_proc) + ret = kgd2kfd_resume_process(kfd); + + return ret; +} + +void kgd2kfd_suspend_process(struct kfd_dev *kfd) +{ + if (!kfd->init_complete) + return; + + mutex_lock(&kfd_processes_mutex); + /* For first KFD device suspend all the KFD processes */ + if (++kfd_locked == 1) + kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); +} + +int kgd2kfd_resume_process(struct kfd_dev *kfd) +{ + int ret = 0; + + if (!kfd->init_complete) + return 0; + + mutex_lock(&kfd_processes_mutex); + if (--kfd_locked == 0) + ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); return ret; } @@ -1447,24 +1478,53 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) kfd_get_num_sdma_engines(node); } -int kgd2kfd_check_and_lock_kfd(void) +int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) { + struct kfd_process *p; + int r = 0, temp, idx; + mutex_lock(&kfd_processes_mutex); - if (!hash_empty(kfd_processes_table) || kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); - return -EBUSY; + + if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd)) + goto out; + + /* fail under system reset/resume or kfd device is partition switching. */ + if (kfd_is_locked(kfd)) { + r = -EBUSY; + goto out; } - ++kfd_locked; + /* + * ensure all running processes are cgroup excluded from device before mode switch. + * i.e. no pdd was created on the process socket. + */ + idx = srcu_read_lock(&kfd_processes_srcu); + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + int i; + + for (i = 0; i < p->n_pdds; i++) { + if (p->pdds[i]->dev->kfd != kfd) + continue; + + r = -EBUSY; + goto proc_check_unlock; + } + } + +proc_check_unlock: + srcu_read_unlock(&kfd_processes_srcu, idx); +out: + if (!r) + ++kfd->kfd_dev_lock; mutex_unlock(&kfd_processes_mutex); - return 0; + return r; } -void kgd2kfd_unlock_kfd(void) +void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) { mutex_lock(&kfd_processes_mutex); - --kfd_locked; + --kfd->kfd_dev_lock; mutex_unlock(&kfd_processes_mutex); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c610e172a2b8..2d91027e2a74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1576,8 +1576,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { - dev_err(dev, "No more SDMA queue to allocate\n"); + if (bitmap_empty(dqm->sdma_bitmap, get_num_sdma_queues(dqm))) { + dev_warn(dev, "No more SDMA queue to allocate (%d total queues)\n", + get_num_sdma_queues(dqm)); return -ENOMEM; } @@ -1602,8 +1603,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { - dev_err(dev, "No more XGMI SDMA queue to allocate\n"); + if (bitmap_empty(dqm->xgmi_sdma_bitmap, get_num_xgmi_sdma_queues(dqm))) { + dev_warn(dev, "No more XGMI SDMA queue to allocate (%d total queues)\n", + get_num_xgmi_sdma_queues(dqm)); return -ENOMEM; } if (restore_sdma_id) { @@ -1662,8 +1664,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, } if (!free_bit_found) { - dev_err(dev, "No more SDMA queue to allocate for target ID %i\n", - q->properties.sdma_engine_id); + dev_warn(dev, "No more SDMA queue to allocate for target ID %i (%d total queues)\n", + q->properties.sdma_engine_id, num_queues); return -ENOMEM; } } @@ -2310,7 +2312,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm) continue; /* Reset engine and check. */ - if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) || + if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) || dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) || !set_sdma_queue_as_reset(dqm, doorbell_off)) { r = -ENOTRECOVERABLE; @@ -2337,9 +2339,18 @@ reset_fail: static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma) { + struct amdgpu_device *adev = dqm->dev->adev; + while (halt_if_hws_hang) schedule(); + if (adev->debug_disable_gpu_ring_reset) { + dev_info_once(adev->dev, + "%s queue hung, but ring reset disabled", + is_sdma ? "sdma" : "compute"); + + return -EPERM; + } if (!amdgpu_gpu_recovery) return -ENOTRECOVERABLE; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index fecdb6794075..82905f3e54dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -1177,6 +1177,25 @@ void kfd_signal_hw_exception_event(u32 pasid) kfd_unref_process(p); } +void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va) +{ + struct kfd_process_device *pdd; + struct kfd_hsa_memory_exception_data exception_data; + int i; + + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.va = gpu_va; + exception_data.failure.NotPresent = 1; + + // Send VM seg fault to all kfd process device + for (i = 0; i < p->n_pdds; i++) { + pdd = p->pdds[i]; + exception_data.gpu_id = pdd->user_gpu_id; + kfd_evict_process_device(pdd); + kfd_signal_vm_fault_event(pdd, NULL, &exception_data); + } +} + void kfd_signal_vm_fault_event(struct kfd_process_device *pdd, struct kfd_vm_fault_info *info, struct kfd_hsa_memory_exception_data *data) @@ -1283,7 +1302,7 @@ void kfd_signal_reset_event(struct kfd_node *dev) if (ti) { dev_err(dev->adev->dev, "Queues reset on process %s tid %d thread %s pid %d\n", - ti->process_name, ti->tgid, ti->task_name, ti->pid); + ti->process_name, ti->tgid, ti->task.comm, ti->task.pid); amdgpu_vm_put_task_info(ti); } } @@ -1331,6 +1350,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); if (unlikely(user_gpu_id == -EINVAL)) { WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); + kfd_unref_process(p); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index dbcb60eb54b2..1d170dc50df3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -23,7 +23,6 @@ */ #include <linux/device.h> -#include <linux/export.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/sched.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 37b69fe0ede3..3e1ad8974797 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -168,14 +168,14 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev, client_id != SOC15_IH_CLIENTID_SE3SH) return false; - pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", - client_id, source_id, vmid, pasid); - pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", - data[0], data[1], data[2], data[3], - data[4], data[5], data[6], data[7]); + dev_dbg(dev->adev->dev, + "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); + dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6], + data[7]); - /* If there is no valid PASID, it's likely a bug */ - if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + if (pasid == 0) return 0; /* Interrupt types we care about: various signals and faults. @@ -217,37 +217,66 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); switch (encoding) { case SQ_INTERRUPT_WORD_ENCODING_AUTO: - pr_debug_ratelimited( + dev_dbg_ratelimited( + dev->adev->dev, "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n", - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1, - SE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, - THREAD_TRACE), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, - WLT), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, - THREAD_TRACE_BUF0_FULL), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, - THREAD_TRACE_BUF1_FULL), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, - THREAD_TRACE_UTC_ERROR)); + REG_GET_FIELD( + context_id1, + SQ_INTERRUPT_WORD_AUTO_CTXID1, + SE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID0, + WLT), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF0_FULL), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF1_FULL), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_UTC_ERROR)); break; case SQ_INTERRUPT_WORD_ENCODING_INST: - pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, - SE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - DATA), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - SA_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - PRIV), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - WAVE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - SIMD_ID), - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, - WGP_ID)); + dev_dbg_ratelimited( + dev->adev->dev, + "sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", + REG_GET_FIELD( + context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD( + context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID)); if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) { if (kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_DEBUG_DOORBELL_ID(context_id0), @@ -259,21 +288,37 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, case SQ_INTERRUPT_WORD_ENCODING_ERROR: sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0, ERR_TYPE); - pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n", - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, - SE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - DATA), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - SA_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - PRIV), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - WAVE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, - SIMD_ID), - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, - WGP_ID), + dev_warn_ratelimited( + dev->adev->dev, + "sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n", + REG_GET_FIELD( + context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD( + context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID), sq_intr_err_type); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index c5f97e6e36ff..2788a52714d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -148,44 +148,69 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \ KFD_CTXID0_DOORBELL_ID_MASK) -static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1) +static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0, + uint32_t context_id1) { - pr_debug_ratelimited( + dev_dbg_ratelimited( + dev->adev->dev, "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n", - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE), REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR)); + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + REG_TIMESTAMP), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + CMD_TIMESTAMP), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + HOST_CMD_OVERFLOW), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + HOST_REG_OVERFLOW), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + IMMED_OVERFLOW), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_UTC_ERROR)); } -static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1) +static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0, + uint32_t context_id1) { - pr_debug_ratelimited( + dev_dbg_ratelimited( + dev->adev->dev, "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SH_ID), REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID), - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID), - REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID)); + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID)); } -static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1) +static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0, + uint32_t context_id1) { - pr_warn_ratelimited( + dev_warn_ratelimited( + dev->adev->dev, "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID)); + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, + DETAIL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, + TYPE), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, + SH_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, + SIMD_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, + WGP_ID)); } static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, @@ -255,14 +280,14 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, (context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG)) return false; - pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", - client_id, source_id, vmid, pasid); - pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", - data[0], data[1], data[2], data[3], - data[4], data[5], data[6], data[7]); + dev_dbg(dev->adev->dev, + "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); + dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6], + data[7]); - /* If there is no valid PASID, it's likely a bug */ - if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + if (pasid == 0) return false; /* Interrupt types we care about: various signals and faults. @@ -353,10 +378,10 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); switch (sq_int_enc) { case SQ_INTERRUPT_WORD_ENCODING_AUTO: - print_sq_intr_info_auto(context_id0, context_id1); + print_sq_intr_info_auto(dev, context_id0, context_id1); break; case SQ_INTERRUPT_WORD_ENCODING_INST: - print_sq_intr_info_inst(context_id0, context_id1); + print_sq_intr_info_inst(dev, context_id0, context_id1); sq_int_priv = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, @@ -366,7 +391,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, return; break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: - print_sq_intr_info_error(context_id0, context_id1); + print_sq_intr_info_error(dev, context_id0, context_id1); sq_int_errtype = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE); if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index b8a91bf4ef30..4ceb251312a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -314,11 +314,12 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, & ~pasid_mask) | pasid); } - pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", - client_id, source_id, vmid, pasid); - pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", - data[0], data[1], data[2], data[3], - data[4], data[5], data[6], data[7]); + dev_dbg(dev->adev->dev, + "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); + dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6], + data[7]); /* If there is no valid PASID, it's likely a bug */ if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) @@ -379,28 +380,82 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING); switch (encoding) { case SQ_INTERRUPT_WORD_ENCODING_AUTO: - pr_debug_ratelimited( + dev_dbg_ratelimited( + dev->adev->dev, "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n", - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR)); + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + SE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + THREAD_TRACE), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + WLT), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + THREAD_TRACE_BUF_FULL), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + REG_TIMESTAMP), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + CMD_TIMESTAMP), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + HOST_CMD_OVERFLOW), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + HOST_REG_OVERFLOW), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + IMMED_OVERFLOW), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_AUTO_CTXID, + THREAD_TRACE_UTC_ERROR)); break; case SQ_INTERRUPT_WORD_ENCODING_INST: - pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n", - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), + dev_dbg_ratelimited( + dev->adev->dev, + "sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n", + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + SE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + DATA), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + SH_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + PRIV), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + WAVE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + SIMD_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + CU_ID), sq_int_data); if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) { if (kfd_set_dbg_ev_from_interrupt(dev, pasid, @@ -412,14 +467,37 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE); - pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n", - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), - REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), + dev_warn_ratelimited( + dev->adev->dev, + "sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n", + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + SE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + DATA), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + SH_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + PRIV), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + WAVE_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + SIMD_ID), + REG_GET_FIELD( + context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID, + CU_ID), sq_intr_err); if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 80320a6c8854..97933d2a3803 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -495,6 +495,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, m->sdma_engine_id = q->sdma_engine_id; m->sdma_queue_id = q->sdma_queue_id; m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; + /* Allow context switch so we don't cross-process starve with a massive + * command buffer of long-running SDMA commands + */ + m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK; q->is_active = QUEUE_IS_ACTIVE(*q); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 271c567242ab..b1a6eb349bb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -31,6 +31,7 @@ #define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0) #define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1) #define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2) +#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3) static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, unsigned int buffer_size_bytes) @@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, static void pm_calc_rlib_size(struct packet_manager *pm, unsigned int *rlib_size, - int *over_subscription) + int *over_subscription, + int xnack_conflict) { unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; unsigned int map_queue_size; @@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm, *over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT; if (gws_queue_count > 1) *over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT; + if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)) + *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT; if (*over_subscription) dev_dbg(dev, "Over subscribed runlist\n"); @@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, unsigned int **rl_buffer, uint64_t *rl_gpu_buffer, unsigned int *rl_buffer_size, - int *is_over_subscription) + int *is_over_subscription, + int xnack_conflict) { struct kfd_node *node = pm->dqm->dev; struct device *dev = node->adev->dev; @@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, if (WARN_ON(pm->allocated)) return -EINVAL; - pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); + pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription, + xnack_conflict); mutex_lock(&pm->lock); @@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm, struct queue *q; struct kernel_queue *kq; int is_over_subscription; + int xnack_enabled = -1; + bool xnack_conflict = 0; rl_wptr = retval = processes_mapped = 0; + /* Check if processes set different xnack modes */ + list_for_each_entry(cur, queues, list) { + qpd = cur->qpd; + if (xnack_enabled < 0) + /* First process */ + xnack_enabled = qpd->pqm->process->xnack_enabled; + else if (qpd->pqm->process->xnack_enabled != xnack_enabled) { + /* Found a process with a different xnack mode */ + xnack_conflict = 1; + break; + } + } + retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, - &alloc_size_bytes, &is_over_subscription); + &alloc_size_bytes, &is_over_subscription, + xnack_conflict); if (retval) return retval; @@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm, dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n", pm->dqm->processes_count, pm->dqm->active_queue_count); +build_runlist_ib: /* build the run list ib packet */ list_for_each_entry(cur, queues, list) { qpd = cur->qpd; + /* group processes with the same xnack mode together */ + if (qpd->pqm->process->xnack_enabled != xnack_enabled) + continue; /* build map process packet */ if (processes_mapped >= pm->dqm->processes_count) { dev_dbg(dev, "Not enough space left in runlist IB\n"); @@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm, alloc_size_bytes); } } + if (xnack_conflict) { + /* pick up processes with the other xnack mode */ + xnack_enabled = !xnack_enabled; + xnack_conflict = 0; + goto build_runlist_ib; + } dev_dbg(dev, "Finished map process and queues to runlist\n"); if (is_over_subscription) { if (!pm->is_over_subscription) - dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n", - is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ? - " too many processes." : "", - is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ? - " too many queues." : "", - is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ? - " multiple processes using cooperative launch." : ""); + dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n", + is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ? + " too many processes" : "", + is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ? + " too many queues" : "", + is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ? + " multiple processes using cooperative launch" : "", + is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ? + " xnack on/off processes mixed on gfx9" : ""); retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 2893fd5e5d00..505036968a77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm, memset(buffer, 0, sizeof(struct pm4_mes_map_process)); packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, sizeof(struct pm4_mes_map_process)); - if (adev->enforce_isolation[kfd->node_id]) + if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE) packet->bitfields2.exec_cleaner_shader = 1; packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; packet->bitfields2.process_quantum = 10; @@ -102,7 +102,8 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, sizeof(struct pm4_mes_map_process_aldebaran)); - if (adev->enforce_isolation[knode->node_id]) + if (adev->enforce_isolation[knode->node_id] == + AMDGPU_ENFORCE_ISOLATION_ENABLE) packet->bitfields2.exec_cleaner_shader = 1; packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; packet->bitfields2.process_quantum = 10; @@ -165,9 +166,9 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, * hws_max_conc_proc has been done in * kgd2kfd_device_init(). */ - concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ? - 1 : min(pm->dqm->processes_count, - kfd->max_proc_per_quantum); + concurrent_proc_cnt = (adev->enforce_isolation[kfd->node_id] == + AMDGPU_ENFORCE_ISOLATION_ENABLE) ? + 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum); packet = (struct pm4_mes_runlist *)buffer; @@ -202,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, queue_type__mes_set_resources__hsa_interface_queue_hiq; packet->bitfields2.vmid_mask = res->vmid_mask; packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; + if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN) + packet->bitfields2.enb_xnack_retry_disable_check = 1; packet->bitfields7.oac_mask = res->oac_mask; packet->bitfields8.gds_heap_base = res->gds_heap_base; packet->bitfields8.gds_heap_size = res->gds_heap_size; @@ -237,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->bitfields2.engine_sel = engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; + packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0; packet->bitfields2.extended_engine_sel = extended_engine_sel__mes_map_queues__legacy_engine_sel; packet->bitfields2.queue_type = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index cd8611401a66..e356a207d03c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -63,7 +63,8 @@ struct pm4_mes_set_resources { struct { uint32_t vmid_mask:16; uint32_t unmap_latency:8; - uint32_t reserved1:5; + uint32_t reserved1:4; + uint32_t enb_xnack_retry_disable_check:1; enum mes_set_resources_queue_type_enum queue_type:3; } bitfields2; uint32_t ordinal2; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f6aedf69c644..67694bcd9464 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -372,6 +372,9 @@ struct kfd_dev { /* bitmap for dynamic doorbell allocation from doorbell object */ unsigned long *doorbell_bitmap; + + /* for dynamic partitioning */ + int kfd_dev_lock; }; enum kfd_mempool { @@ -1507,6 +1510,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, int kfd_get_num_events(struct kfd_process *p); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); +void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va); + void kfd_signal_vm_fault_event(struct kfd_process_device *pdd, struct kfd_vm_fault_info *info, struct kfd_hsa_memory_exception_data *data); @@ -1534,7 +1539,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) int kfd_send_exception_to_runtime(struct kfd_process *p, unsigned int queue_id, uint64_t error_reason); -bool kfd_is_locked(void); +bool kfd_is_locked(struct kfd_dev *kfd); /* Compute profile */ void kfd_inc_compute_active(struct kfd_node *dev); @@ -1581,10 +1586,15 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev); int pm_debugfs_hang_hws(struct packet_manager *pm); int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); +void kfd_debugfs_add_process(struct kfd_process *p); +void kfd_debugfs_remove_process(struct kfd_process *p); + #else static inline void kfd_debugfs_init(void) {} static inline void kfd_debugfs_fini(void) {} +static inline void kfd_debugfs_add_process(struct kfd_process *p) {} +static inline void kfd_debugfs_remove_process(struct kfd_process *p) {} #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7c0c24732481..5be28c6c4f6a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) */ mutex_lock(&kfd_processes_mutex); - if (kfd_is_locked()) { + if (kfd_is_locked(NULL)) { pr_debug("KFD is locked! Cannot create process"); process = ERR_PTR(-EINVAL); goto out; @@ -900,6 +900,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) kfd_procfs_add_sysfs_files(process); kfd_procfs_add_sysfs_counters(process); + kfd_debugfs_add_process(process); + init_waitqueue_head(&process->wait_irq_drain); } out: @@ -1054,6 +1056,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; + kfd_smi_event_process(pdd, false); + pr_debug("Releasing pdd (topology id %d, for pid %d)\n", pdd->dev->id, p->lead_thread->pid); kfd_process_device_destroy_cwsr_dgpu(pdd); @@ -1174,6 +1178,7 @@ static void kfd_process_wq_release(struct work_struct *work) dma_fence_signal(ef); kfd_process_remove_sysfs(p); + kfd_debugfs_remove_process(p); kfd_process_kunmap_signal_bo(p); kfd_process_free_outstanding_kfd_bos(p); @@ -1715,6 +1720,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, pdd->pasid = avm->pasid; pdd->drm_file = drm_file; + kfd_smi_event_process(pdd, true); + return 0; err_get_pasid: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 7eb370b68159..c643e0ccec52 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -279,20 +279,17 @@ static int init_user_queue(struct process_queue_manager *pqm, /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) */ - if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) - >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { - if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) { - pr_err("Queue memory allocated to wrong device\n"); - retval = -EINVAL; - goto free_gang_ctx_bo; - } + if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) { + pr_err("Queue memory allocated to wrong device\n"); + retval = -EINVAL; + goto free_gang_ctx_bo; + } - retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo, - &(*q)->wptr_bo_gart); - if (retval) { - pr_err("Failed to map wptr bo to GART\n"); - goto free_gang_ctx_bo; - } + retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo, + &(*q)->wptr_bo_gart); + if (retval) { + pr_err("Failed to map wptr bo to GART\n"); + goto free_gang_ctx_bo; } } @@ -451,8 +448,15 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("process pid %d DQM create queue type %d failed. ret %d\n", - pqm->process->lead_thread->pid, type, retval); + if ((type == KFD_QUEUE_TYPE_SDMA || + type == KFD_QUEUE_TYPE_SDMA_XGMI || + type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) && + retval == -ENOMEM) + pr_warn("process pid %d DQM create queue type %d failed. ret %d\n", + pqm->process->lead_thread->pid, type, retval); + else + pr_err("process pid %d DQM create queue type %d failed. ret %d\n", + pqm->process->lead_thread->pid, type, retval); goto err_create_queue; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index 4afff7094caf..a65c67cf56ff 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -402,7 +402,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv) { u32 vgpr_size = 0x40000; - if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */ + if (gfxv == 90402 || /* GFX_VERSION_AQUA_VANJARAM */ gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */ gfxv == 90008 || /* GFX_VERSION_ARCTURUS */ gfxv == 90500) @@ -462,7 +462,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev) if (gfxv == 80002) /* GFX_VERSION_TONGA */ props->eop_buffer_size = 0x8000; - else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */ + else if (gfxv == 90402) /* GFX_VERSION_AQUA_VANJARAM */ props->eop_buffer_size = 4096; else if (gfxv >= 80000) props->eop_buffer_size = 4096; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 9b8169761ec5..a499449fcb06 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -163,10 +163,9 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep) static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client, unsigned int event) { - uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS); uint64_t events = READ_ONCE(client->events); - if (pid && client->pid != pid && !(client->suser && (events & all))) + if (pid && client->pid != pid && !client->suser) return false; return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event); @@ -254,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid); if (task_info) { /* Report VM faults from user applications, not retry from kernel */ - if (task_info->pid) + if (task_info->task.pid) kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT( - task_info->pid, task_info->task_name)); + task_info->task.pid, task_info->task.comm)); amdgpu_vm_put_task_info(task_info); } } @@ -345,6 +344,27 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, pid, address, last - address + 1, node->id, trigger)); } +void kfd_smi_event_process(struct kfd_process_device *pdd, bool start) +{ + struct amdgpu_task_info *task_info; + struct amdgpu_vm *avm; + + if (!pdd->drm_priv) + return; + + avm = drm_priv_to_vm(pdd->drm_priv); + task_info = amdgpu_vm_get_task_info_vm(avm); + + if (task_info) { + kfd_smi_event_add(0, pdd->dev, + start ? KFD_SMI_EVENT_PROCESS_START : + KFD_SMI_EVENT_PROCESS_END, + KFD_EVENT_FMT_PROCESS(task_info->task.pid, + task_info->task.comm)); + amdgpu_vm_put_task_info(task_info); + } +} + int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) { struct kfd_smi_client *client; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 503bff13d815..bb4d72b57387 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -53,4 +53,5 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm); void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger); +void kfd_smi_event_process(struct kfd_process_device *pdd, bool start); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 100717a98ec1..a0f22ea6d15a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1171,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start, } static void -svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, - struct svm_range *pchild, enum svm_work_list_ops op) +svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op) { pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", pchild, pchild->start, pchild->last, prange, op); - pchild->work_item.mm = mm; + pchild->work_item.mm = NULL; pchild->work_item.op = op; list_add_tail(&pchild->child_list, &prange->child_list); } @@ -1245,8 +1244,7 @@ svm_range_get_pte_flags(struct kfd_node *node, case IP_VERSION(9, 4, 4): case IP_VERSION(9, 5, 0): if (ext_coherent) - mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ? - AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC; + mtype_local = AMDGPU_VM_MTYPE_CC; else mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; @@ -1279,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node, mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; /* system memory accessed by the dGPU */ } else { - if (gc_ip_version < IP_VERSION(9, 5, 0)) + if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent) mapping_flags |= AMDGPU_VM_MTYPE_UC; else mapping_flags |= AMDGPU_VM_MTYPE_NC; @@ -2395,15 +2393,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, prange->work_item.op != SVM_OP_UNMAP_RANGE) prange->work_item.op = op; } else { - prange->work_item.op = op; - - /* Pairs with mmput in deferred_list_work */ - mmget(mm); - prange->work_item.mm = mm; - list_add_tail(&prange->deferred_list, - &prange->svms->deferred_range_list); - pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", - prange, prange->start, prange->last, op); + /* Pairs with mmput in deferred_list_work. + * If process is exiting and mm is gone, don't update mmu notifier. + */ + if (mmget_not_zero(mm)) { + prange->work_item.mm = mm; + prange->work_item.op = op; + list_add_tail(&prange->deferred_list, + &prange->svms->deferred_range_list); + pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", + prange, prange->start, prange->last, op); + } } spin_unlock(&svms->deferred_list_lock); } @@ -2417,8 +2417,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms) } static void -svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, - struct svm_range *prange, unsigned long start, +svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start, unsigned long last) { struct svm_range *head; @@ -2439,12 +2438,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, svm_range_split(tail, last + 1, tail->last, &head); if (head != prange && tail != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE); } else if (tail != prange) { - svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE); } else if (head != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); } else if (parent != prange) { prange->work_item.op = SVM_OP_UNMAP_RANGE; } @@ -2521,14 +2520,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, l = min(last, pchild->last); if (l >= s) svm_range_unmap_from_gpus(pchild, s, l, trigger); - svm_range_unmap_split(mm, prange, pchild, start, last); + svm_range_unmap_split(prange, pchild, start, last); mutex_unlock(&pchild->lock); } s = max(start, prange->start); l = min(last, prange->last); if (l >= s) svm_range_unmap_from_gpus(prange, s, l, trigger); - svm_range_unmap_split(mm, prange, prange, start, last); + svm_range_unmap_split(prange, prange, start, last); if (unmap_parent) svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); @@ -2571,8 +2570,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; - if (!mmget_not_zero(mni->mm)) - return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2599,7 +2596,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); - mmput(mni->mm); return true; } @@ -4076,8 +4072,8 @@ exit: return ret; } -int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, - uint64_t *svm_priv_data_size) +void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, + uint64_t *svm_priv_data_size) { uint64_t total_size, accessibility_size, common_attr_size; int nattr_common = 4, nattr_accessibility = 1; @@ -4089,8 +4085,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, *svm_priv_data_size = 0; svms = &p->svms; - if (!svms) - return -EINVAL; mutex_lock(&svms->lock); list_for_each_entry(prange, &svms->list, list) { @@ -4132,7 +4126,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges, *svm_priv_data_size); - return 0; } int kfd_criu_checkpoint_svm(struct kfd_process *p, @@ -4149,8 +4142,6 @@ int kfd_criu_checkpoint_svm(struct kfd_process *p, struct mm_struct *mm; svms = &p->svms; - if (!svms) - return -EINVAL; mm = get_task_mm(p->lead_thread); if (!mm) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 6ea23c78009c..01c7a4877904 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -184,8 +184,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms); void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr, unsigned long offset, unsigned long npages); void svm_range_dma_unmap(struct svm_range *prange); -int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, - uint64_t *svm_priv_data_size); +void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, + uint64_t *svm_priv_data_size); int kfd_criu_checkpoint_svm(struct kfd_process *p, uint8_t __user *user_priv_data, uint64_t *priv_offset); @@ -237,13 +237,12 @@ static inline int svm_range_schedule_evict_svm_bo( return -EINVAL; } -static inline int svm_range_get_info(struct kfd_process *p, - uint32_t *num_svm_ranges, - uint64_t *svm_priv_data_size) +static inline void svm_range_get_info(struct kfd_process *p, + uint32_t *num_svm_ranges, + uint64_t *svm_priv_data_size) { *num_svm_ranges = 0; *svm_priv_data_size = 0; - return 0; } static inline int kfd_criu_checkpoint_svm(struct kfd_process *p, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e477d7509646..4ec73f33535e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.capability |= HSA_CAP_AQL_QUEUE_DOUBLE_MAP; + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) && + (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) + dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; + sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute", dev->node_props.max_engine_clk_fcompute); @@ -1267,34 +1271,41 @@ static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev, { struct kfd_node *gpu = outbound_link->gpu; struct amdgpu_device *adev = gpu->adev; - int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes; + unsigned int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes; + unsigned int num_xgmi_sdma_engines = kfd_get_num_xgmi_sdma_engines(gpu); + unsigned int num_sdma_engines = kfd_get_num_sdma_engines(gpu); + uint32_t sdma_eng_id_mask = (1 << num_sdma_engines) - 1; + uint32_t xgmi_sdma_eng_id_mask = + ((1 << num_xgmi_sdma_engines) - 1) << num_sdma_engines; + bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu && adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 && - kfd_get_num_xgmi_sdma_engines(gpu) >= 14 && - (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8); + num_xgmi_sdma_engines >= 6 && (!(adev->flags & AMD_IS_APU) && + num_xgmi_nodes == 8); if (support_rec_eng) { int src_socket_id = adev->gmc.xgmi.physical_node_id; int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id; + unsigned int reshift = num_xgmi_sdma_engines == 6 ? 1 : 0; outbound_link->rec_sdma_eng_id_mask = - 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id]; + 1 << (rec_sdma_eng_map[src_socket_id][dst_socket_id] >> reshift); inbound_link->rec_sdma_eng_id_mask = - 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id]; - } else { - int num_sdma_eng = kfd_get_num_sdma_engines(gpu); - int i, eng_offset = 0; + 1 << (rec_sdma_eng_map[dst_socket_id][src_socket_id] >> reshift); - if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI && - kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) { - eng_offset = num_sdma_eng; - num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu); - } + /* If recommended engine is out of range, need to reset the mask */ + if (outbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask) + outbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask; + if (inbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask) + inbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask; - for (i = 0; i < num_sdma_eng; i++) { - outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset)); - inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset)); - } + } else { + uint32_t engine_mask = (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI && + num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask : + sdma_eng_id_mask; + + outbound_link->rec_sdma_eng_id_mask = engine_mask; + inbound_link->rec_sdma_eng_id_mask = engine_mask; } } @@ -1983,9 +1994,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (kfd_dbg_has_ttmps_always_setup(dev->gpu)) dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; - if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) - dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; - if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3) || KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4)) @@ -2001,7 +2009,9 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; - dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; + if (!amdgpu_sriov_vf(dev->gpu->adev)) + dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; + } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c index faed84172dd4..8bc36f04b1b7 100644 --- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c @@ -21,6 +21,7 @@ * */ +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index ab2a97e354da..7329b8cc2576 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -38,6 +38,7 @@ AMDGPUDM = \ amdgpu_dm_pp_smu.o \ amdgpu_dm_psr.o \ amdgpu_dm_replay.o \ + amdgpu_dm_quirks.o \ amdgpu_dm_wb.o ifdef CONFIG_DRM_AMD_DC_FP diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bae83a129b5f..2a175fc0399c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -80,7 +80,6 @@ #include <linux/power_supply.h> #include <linux/firmware.h> #include <linux/component.h> -#include <linux/dmi.h> #include <linux/sort.h> #include <drm/display/drm_dp_mst_helper.h> @@ -115,6 +114,8 @@ #include "modules/inc/mod_freesync.h" #include "modules/power/power_helpers.h" +static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch"); + #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" @@ -280,7 +281,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) acrtc = adev->mode_info.crtcs[crtc]; if (!acrtc->dm_irq_params.stream) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", crtc); return 0; } @@ -301,7 +302,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, acrtc = adev->mode_info.crtcs[crtc]; if (!acrtc->dm_irq_params.stream) { - DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n", crtc); return 0; } @@ -372,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) @@ -749,6 +752,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, complete(&adev->dm.dmub_aux_transfer_done); } +static void dmub_aux_fused_io_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + if (!adev || !notify) { + ASSERT(false); + return; + } + + const struct dmub_cmd_fused_request *req = ¬ify->fused_request; + const uint8_t ddc_line = req->u.aux.ddc_line; + + if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) { + ASSERT(false); + return; + } + + struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line]; + + static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch"); + memcpy(sync->reply_data, req, sizeof(*req)); + complete(&sync->replied); +} + /** * dmub_hpd_callback - DMUB HPD interrupt processing callback. * @adev: amdgpu_device pointer @@ -772,18 +798,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, return; if (notify == NULL) { - DRM_ERROR("DMUB HPD callback notification was NULL"); + drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL"); return; } if (notify->link_index > adev->dm.dc->link_count) { - DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index); return; } /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { - DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); + drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n"); return; } @@ -800,11 +826,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) - DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); + drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index); else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) - DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index); + drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index); else - DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", + drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n", notify->type, link_index); hpd_aconnector = aconnector; @@ -816,7 +842,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, if (hpd_aconnector) { if (notify->type == DMUB_NOTIFICATION_HPD) { if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG)) - DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index); + drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index); handle_hpd_irq_helper(hpd_aconnector); } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) { handle_hpd_rx_irq(hpd_aconnector); @@ -835,7 +861,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, static void dmub_hpd_sense_callback(struct amdgpu_device *adev, struct dmub_notification *notify) { - DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); + drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n"); } /** @@ -871,7 +897,7 @@ static void dm_handle_hpd_work(struct work_struct *work) dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); if (!dmub_hpd_wrk->dmub_notify) { - DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL"); return; } @@ -885,6 +911,30 @@ static void dm_handle_hpd_work(struct work_struct *work) } +static const char *dmub_notification_type_str(enum dmub_notification_type e) +{ + switch (e) { + case DMUB_NOTIFICATION_NO_DATA: + return "NO_DATA"; + case DMUB_NOTIFICATION_AUX_REPLY: + return "AUX_REPLY"; + case DMUB_NOTIFICATION_HPD: + return "HPD"; + case DMUB_NOTIFICATION_HPD_IRQ: + return "HPD_IRQ"; + case DMUB_NOTIFICATION_SET_CONFIG_REPLY: + return "SET_CONFIG_REPLY"; + case DMUB_NOTIFICATION_DPIA_NOTIFICATION: + return "DPIA_NOTIFICATION"; + case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: + return "HPD_SENSE_NOTIFY"; + case DMUB_NOTIFICATION_FUSED_IO: + return "FUSED_IO"; + default: + return "<unknown>"; + } +} + #define DMUB_TRACE_MAX_READ 64 /** * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt @@ -902,22 +952,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct dmcub_trace_buf_entry entry = { 0 }; u32 count = 0; struct dmub_hpd_work *dmub_hpd_wrk; - static const char *const event_type[] = { - "NO_DATA", - "AUX_REPLY", - "HPD", - "HPD_IRQ", - "SET_CONFIGC_REPLY", - "DPIA_NOTIFICATION", - "HPD_SENSE_NOTIFY", - }; do { if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, entry.param0, entry.param1); - DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", + drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", entry.trace_code, entry.tick_count, entry.param0, entry.param1); } else break; @@ -927,7 +968,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) } while (count <= DMUB_TRACE_MAX_READ); if (count > DMUB_TRACE_MAX_READ) - DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); + drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ"); if (dc_enable_dmub_notifications(adev->dm.dc) && irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { @@ -935,25 +976,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { - DRM_ERROR("DM: notify type %d invalid!", notify.type); + drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type); continue; } if (!dm->dmub_callback[notify.type]) { - DRM_WARN("DMUB notification skipped due to no handler: type=%s\n", - event_type[notify.type]); + drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n", + dmub_notification_type_str(notify.type)); continue; } if (dm->dmub_thread_offload[notify.type] == true) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { - DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk"); return; } dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), GFP_ATOMIC); if (!dmub_hpd_wrk->dmub_notify) { kfree(dmub_hpd_wrk); - DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); + drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify"); return; } INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); @@ -1011,10 +1052,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) &compressor->gpu_addr, &compressor->cpu_addr); if (r) - DRM_ERROR("DM: Failed to initialize FBC\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n"); else { adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; - DRM_INFO("DM: FBC alloc %lu\n", max_size*4); + drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4); } } @@ -1179,13 +1220,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; if (!fb_info) { - DRM_ERROR("No framebuffer info for DMUB service.\n"); + drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n"); return -EINVAL; } if (!dmub_fw) { /* Firmware required for DMUB support. */ - DRM_ERROR("No firmware provided for DMUB.\n"); + drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n"); return -EINVAL; } @@ -1195,19 +1236,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error checking HW support for DMUB: %d\n", status); + drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status); return -EINVAL; } if (!has_hw_support) { - DRM_INFO("DMUB unsupported on ASIC\n"); + drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n"); return 0; } /* Reset DMCUB if it was previously running - before we overwrite its memory. */ status = dmub_srv_hw_reset(dmub_srv); if (status != DMUB_STATUS_OK) - DRM_WARN("Error resetting DMUB HW: %d\n", status); + drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status); hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; @@ -1290,6 +1331,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) case IP_VERSION(3, 5, 1): case IP_VERSION(3, 6, 0): hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + hw_params.lower_hbr3_phy_ssc = true; break; default: break; @@ -1297,14 +1339,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error initializing DMUB HW: %d\n", status); + drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status); return -EINVAL; } /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) - DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); /* Init DMCU and ABM if available. */ if (dmcu && abm) { @@ -1315,11 +1357,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) if (!adev->dm.dc->ctx->dmub_srv) adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) { - DRM_ERROR("Couldn't allocate DC DMUB server!\n"); + drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n"); return -ENOMEM; } - DRM_INFO("DMUB hardware initialized: version=0x%08X\n", + drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n", adev->dm.dmcub_fw_version); /* Keeping sanity checks off if @@ -1362,18 +1404,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev) status = dmub_srv_is_hw_init(dmub_srv, &init); if (status != DMUB_STATUS_OK) - DRM_WARN("DMUB hardware init check failed: %d\n", status); + drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status); if (status == DMUB_STATUS_OK && init) { /* Wait for firmware load to finish. */ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK) - DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status); } else { /* Perform the full hardware initialization. */ r = dm_dmub_hw_init(adev); if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); } } @@ -1483,18 +1525,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work) offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); aconnector = offload_work->offload_wq->aconnector; + adev = offload_work->adev; if (!aconnector) { - DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work"); goto skip; } - adev = drm_to_adev(aconnector->base.dev); dc_link = aconnector->dc_link; mutex_lock(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); mutex_unlock(&aconnector->hpd_lock); if (new_connection_type == dc_connection_none) @@ -1563,8 +1605,9 @@ skip: } -static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev) { + struct dc *dc = adev->dm.dc; int max_caps = dc->caps.max_links; int i = 0; struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; @@ -1580,7 +1623,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); if (hpd_rx_offload_wq[i].wq == NULL) { - DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!"); goto out_err; } @@ -1629,132 +1672,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } -struct amdgpu_dm_quirks { - bool aux_hpd_discon; - bool support_edp0_on_dp1; -}; - -static struct amdgpu_dm_quirks quirk_entries = { - .aux_hpd_discon = false, - .support_edp0_on_dp1 = false -}; - -static int edp0_on_dp1_callback(const struct dmi_system_id *id) -{ - quirk_entries.support_edp0_on_dp1 = true; - return 0; -} - -static int aux_hpd_discon_callback(const struct dmi_system_id *id) -{ - quirk_entries.aux_hpd_discon = true; - return 0; -} - -static const struct dmi_system_id dmi_quirk_table[] = { - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), - }, - }, - { - .callback = aux_hpd_discon_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), - }, - }, - { - .callback = edp0_on_dp1_callback, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), - }, - }, - {} - /* TODO: refactor this from a fixed table to a dynamic option */ -}; - -static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data) -{ - int dmi_id; - struct drm_device *dev = dm->ddev; - - dm->aux_hpd_discon_quirk = false; - init_data->flags.support_edp0_on_dp1 = false; - - dmi_id = dmi_check_system(dmi_quirk_table); - - if (!dmi_id) - return; - - if (quirk_entries.aux_hpd_discon) { - dm->aux_hpd_discon_quirk = true; - drm_info(dev, "aux_hpd_discon_quirk attached\n"); - } - if (quirk_entries.support_edp0_on_dp1) { - init_data->flags.support_edp0_on_dp1 = true; - drm_info(dev, "support_edp0_on_dp1 attached\n"); - } -} void* dm_allocate_gpu_mem( @@ -1841,10 +1758,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, return DMUB_STATUS_TIMEOUT; } -static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) +static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) { - struct dml2_soc_bb *bb; + void *bb; long long addr; + unsigned int bb_size; int i = 0; uint16_t chunk; enum dmub_gpint_command send_addrs[] = { @@ -1857,6 +1775,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(4, 0, 1): + bb_size = sizeof(struct dml2_soc_bb); break; default: return NULL; @@ -1864,7 +1783,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * bb = dm_allocate_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, - sizeof(struct dml2_soc_bb), + bb_size, &addr); if (!bb) return NULL; @@ -1899,26 +1818,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode( switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 5, 0): case IP_VERSION(3, 6, 0): - /* - * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to - * cause a hard hang. A fix exists for newer PMFW. - * - * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest - * IPS state in all cases, except for s0ix and all displays off (DPMS), - * where IPS2 is allowed. - * - * When checking pmfw version, use the major and minor only. - */ - if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) - ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; - else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) - /* - * Other ASICs with DCN35 that have residency issues with - * IPS2 in idle. - * We want them to use IPS2 only in display off cases. - */ - ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; - break; case IP_VERSION(3, 5, 1): ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; break; @@ -1950,7 +1849,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.audio_lock); if (amdgpu_dm_irq_init(adev)) { - DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); + drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n"); goto error; } @@ -2061,7 +1960,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; - retrieve_dmi_info(&adev->dm, &init_data); + retrieve_dmi_info(&adev->dm); + if (adev->dm.edp0_on_dp1_quirk) + init_data.flags.support_edp0_on_dp1 = true; if (adev->dm.bb_from_dmub) init_data.bb_from_dmub = adev->dm.bb_from_dmub; @@ -2072,10 +1973,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc = dc_create(&init_data); if (adev->dm.dc) { - DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, + drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER, dce_version_to_string(adev->dm.dc->ctx->dce_version)); } else { - DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); + drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER); goto error; } @@ -2101,33 +2002,44 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP) + if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) { adev->dm.dc->debug.force_disable_subvp = true; + adev->dm.dc->debug.fams2_config.bits.enable = false; + } if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { adev->dm.dc->debug.using_dml2 = true; adev->dm.dc->debug.using_dml21 = true; } + if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE) + adev->dm.dc->debug.hdcp_lc_force_fw_enable = true; + + if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) + adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; + + if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT) + adev->dm.dc->debug.skip_detection_link_training = true; + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; if (adev->dm.dc->caps.dp_hdmi21_pcon_support) - DRM_INFO("DP-HDMI FRL PCON supported\n"); + drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n"); r = dm_dmub_hw_init(adev); if (r) { - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); goto error; } dc_hardware_init(adev->dm.dc); - adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { - DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); goto error; } @@ -2142,10 +2054,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { - DRM_ERROR( - "amdgpu: failed to initialize freesync_module.\n"); + drm_err(adev_to_drm(adev), + "failed to initialize freesync_module.\n"); } else - DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", + drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); amdgpu_dm_init_color_mod(); @@ -2154,7 +2066,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) - DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n"); } if (adev->dm.dc->caps.ips_support && @@ -2165,9 +2077,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) - DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n"); else - DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); dc_init_callbacks(adev->dm.dc, &init_params); } @@ -2175,20 +2087,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { - DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); + drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify"); goto error; } adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); if (!adev->dm.delayed_hpd_wq) { - DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n"); goto error; } amdgpu_dm_outbox_init(adev); if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { - DRM_ERROR("amdgpu: fail to register dmub aux callback"); + drm_err(adev_to_drm(adev), "fail to register dmub aux callback"); + goto error; + } + + for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++) + init_completion(&adev->dm.fused_io[i].replied); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, + dmub_aux_fused_io_callback, false)) { + drm_err(adev_to_drm(adev), "fail to register dmub fused io callback"); goto error; } /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. @@ -2205,8 +2126,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } if (amdgpu_dm_initialize_drm_device(adev)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); + drm_err(adev_to_drm(adev), + "failed to initialize sw for display support.\n"); goto error; } @@ -2220,22 +2141,22 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); + drm_err(adev_to_drm(adev), + "failed to initialize sw for display support.\n"); goto error; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) amdgpu_dm_crtc_secure_display_create_contexts(adev); if (!adev->dm.secure_display_ctx.crtc_ctx) - DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); + drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n"); if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) adev->dm.secure_display_ctx.support_mul_roi = true; #endif - DRM_DEBUG_DRIVER("KMS initialized.\n"); + drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n"); return 0; error: @@ -2408,7 +2329,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) default: break; } - DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); + drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; } @@ -2426,7 +2347,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } if (r) { - dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", + drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n", fw_name_dmcu); amdgpu_ucode_release(&adev->dm.fw_dmcu); return r; @@ -2485,6 +2406,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE }; int r; @@ -2551,7 +2473,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); - DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n", adev->dm.dmcub_fw_version); } @@ -2560,7 +2482,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_srv = adev->dm.dmub_srv; if (!dmub_srv) { - DRM_ERROR("Failed to allocate DMUB service!\n"); + drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n"); return -ENOMEM; } @@ -2573,7 +2495,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) /* Create the DMUB service. */ status = dmub_srv_create(dmub_srv, &create_params); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error creating DMUB service: %d\n", status); + drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status); return -EINVAL; } @@ -2598,7 +2520,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) ®ion_info); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB region info: %d\n", status); + drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status); return -EINVAL; } @@ -2627,14 +2549,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) fb_info = adev->dm.dmub_fb_info; if (!fb_info) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "Failed to allocate framebuffer info for DMUB service!\n"); return -ENOMEM; } status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); if (status != DMUB_STATUS_OK) { - DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status); return -EINVAL; } @@ -2651,7 +2573,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block) adev->dm.cgs_device = amdgpu_cgs_create_device(adev); if (!adev->dm.cgs_device) { - DRM_ERROR("amdgpu: failed to create cgs device.\n"); + drm_err(adev_to_drm(adev), "failed to create cgs device.\n"); return -EINVAL; } @@ -2957,7 +2879,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) ret = amdgpu_dpm_write_watermarks_table(adev); if (ret) { - DRM_ERROR("Failed to update WMTABLE!\n"); + drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n"); return ret; } @@ -2975,13 +2897,13 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) if (oem_ddc_service) { oem_i2c = create_i2c(oem_ddc_service, true); if (!oem_i2c) { - dev_info(adev->dev, "Failed to create oem i2c adapter data\n"); + drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n"); return -ENOMEM; } r = i2c_add_adapter(&oem_i2c->base); if (r) { - dev_info(adev->dev, "Failed to register oem i2c\n"); + drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); kfree(oem_i2c); return r; } @@ -3024,7 +2946,7 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block) r = dm_oem_i2c_hw_init(adev); if (r) - dev_info(adev->dev, "Failed to add OEM i2c bus\n"); + drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n"); return 0; } @@ -3067,7 +2989,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; if (rc) - DRM_WARN("Failed to %s pflip interrupts\n", + drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", enable ? "enable" : "disable"); if (enable) { @@ -3077,14 +2999,14 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); if (rc) - DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; /* During gpu-reset we disable and then enable vblank irq, so * don't use amdgpu_irq_get/put() to avoid refcount change. */ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); + drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); } } @@ -3141,6 +3063,77 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } +static int dm_cache_state(struct amdgpu_device *adev) +{ + int r; + + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) { + r = PTR_ERR(adev->dm.cached_state); + adev->dm.cached_state = NULL; + } + + return adev->dm.cached_state ? 0 : r; +} + +static void dm_destroy_cached_state(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_device *ddev = adev_to_drm(adev); + struct dm_plane_state *dm_new_plane_state; + struct drm_plane_state *new_plane_state; + struct dm_crtc_state *dm_new_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i; + + if (!dm->cached_state) + return; + + /* Force mode set in atomic commit */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + } + + /* + * atomic_check is expected to create the dc states. We need to release + * them here, since they were duplicated as part of the suspend + * procedure. + */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->stream) { + WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); + dc_stream_release(dm_new_crtc_state->stream); + dm_new_crtc_state->stream = NULL; + } + dm_new_crtc_state->base.color_mgmt_changed = true; + } + + for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { + dm_new_plane_state = to_dm_plane_state(new_plane_state); + if (dm_new_plane_state->dc_state) { + WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); + dc_plane_state_release(dm_new_plane_state->dc_state); + dm_new_plane_state->dc_state = NULL; + } + } + + drm_atomic_helper_resume(ddev, dm->cached_state); + + dm->cached_state = NULL; +} + +static void dm_complete(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + dm_destroy_cached_state(adev); +} + static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3149,11 +3142,8 @@ static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) return 0; WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); - return 0; + return dm_cache_state(adev); } static int dm_suspend(struct amdgpu_ip_block *ip_block) @@ -3187,9 +3177,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block) } if (!adev->dm.cached_state) { - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + int r = dm_cache_state(adev); + + if (r) + return r; } s3_handle_hdmi_cec(adev_to_drm(adev), true); @@ -3334,16 +3325,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, for (k = 0; k < dc_state->stream_count; k++) { bundle->stream_update.stream = dc_state->streams[k]; - for (m = 0; m < dc_state->stream_status->plane_count; m++) { + for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { bundle->surface_updates[m].surface = - dc_state->stream_status->plane_states[m]; + dc_state->stream_status[k].plane_states[m]; bundle->surface_updates[m].surface->force_full_update = true; } update_planes_and_stream_adapter(dm->dc, UPDATE_TYPE_FULL, - dc_state->stream_status->plane_count, + dc_state->stream_status[k].plane_count, dc_state->streams[k], &bundle->stream_update, bundle->surface_updates); @@ -3376,12 +3367,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; - struct drm_crtc *crtc; - struct drm_crtc_state *new_crtc_state; - struct dm_crtc_state *dm_new_crtc_state; - struct drm_plane *plane; - struct drm_plane_state *new_plane_state; - struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; @@ -3413,8 +3398,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); - if (r) + if (r) { drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); + return r; + } dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -3460,11 +3447,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) return 0; } - - /* leave display off for S4 sequence */ - if (adev->in_s4) - return 0; - /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); dm_state->context = dc_state_create(dm->dc, NULL); @@ -3543,40 +3525,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) } drm_connector_list_iter_end(&iter); - /* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - new_crtc_state->active_changed = true; - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - reset_freesync_config_for_crtc(dm_new_crtc_state); - } - - /* - * atomic_check is expected to create the dc states. We need to release - * them here, since they were duplicated as part of the suspend - * procedure. - */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (dm_new_crtc_state->stream) { - WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); - dc_stream_release(dm_new_crtc_state->stream); - dm_new_crtc_state->stream = NULL; - } - dm_new_crtc_state->base.color_mgmt_changed = true; - } - - for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { - dm_new_plane_state = to_dm_plane_state(new_plane_state); - if (dm_new_plane_state->dc_state) { - WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); - dc_plane_state_release(dm_new_plane_state->dc_state); - dm_new_plane_state->dc_state = NULL; - } - } - - drm_atomic_helper_resume(ddev, dm->cached_state); - - dm->cached_state = NULL; + dm_destroy_cached_state(adev); /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); @@ -3625,6 +3574,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .prepare_suspend = dm_prepare_suspend, .suspend = dm_suspend, .resume = dm_resume, + .complete = dm_complete, .is_idle = dm_is_idle, .wait_for_idle = dm_wait_for_idle, .check_soft_reset = dm_check_soft_reset, @@ -3696,13 +3646,15 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) luminance_range = &conn_base->display_info.luminance_range; - if (luminance_range->max_luminance) { - caps->aux_min_input_signal = luminance_range->min_luminance; + if (luminance_range->max_luminance) caps->aux_max_input_signal = luminance_range->max_luminance; - } else { - caps->aux_min_input_signal = 0; + else caps->aux_max_input_signal = 512; - } + + if (luminance_range->min_luminance) + caps->aux_min_input_signal = luminance_range->min_luminance; + else + caps->aux_min_input_signal = 1; min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); if (min_input_signal_override >= 0) @@ -3919,20 +3871,21 @@ static void handle_hpd_irq(void *param) } -static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, +static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq, union hpd_irq_data hpd_irq_data) { struct hpd_rx_irq_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); if (!offload_work) { - DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n"); return; } INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); offload_work->data = hpd_irq_data; offload_work->offload_wq = offload_wq; + offload_work->adev = adev; queue_work(offload_wq->wq, &offload_work->work); DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); @@ -3974,7 +3927,7 @@ static void handle_hpd_rx_irq(void *param) goto out; if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -3996,7 +3949,7 @@ static void handle_hpd_rx_irq(void *param) spin_unlock(&offload_wq->offload_lock); if (!skip) - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -4013,7 +3966,7 @@ static void handle_hpd_rx_irq(void *param) spin_unlock(&offload_wq->offload_lock); if (!skip) - schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data); goto out; } @@ -4023,7 +3976,7 @@ out: if (result && !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(dc_link); @@ -4086,19 +4039,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev) if (dc_is_dmub_outbox_supported(adev->dm.dc)) { if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); return -EINVAL; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); return -EINVAL; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, dmub_hpd_sense_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd sense callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback"); return -EINVAL; } } @@ -4119,7 +4072,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_HPD1 || int_params.irq_source > DC_IRQ_SOURCE_HPD6) { - DRM_ERROR("Failed to register hpd irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n"); return -EINVAL; } @@ -4137,7 +4090,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_HPD1RX || int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) { - DRM_ERROR("Failed to register hpd rx irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n"); return -EINVAL; } @@ -4179,7 +4132,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -4190,7 +4143,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { - DRM_ERROR("Failed to register vblank irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); return -EINVAL; } @@ -4209,7 +4162,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -4220,7 +4173,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { - DRM_ERROR("Failed to register pflip irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); return -EINVAL; } @@ -4238,7 +4191,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } @@ -4280,7 +4233,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -4291,7 +4244,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { - DRM_ERROR("Failed to register vblank irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); return -EINVAL; } @@ -4309,7 +4262,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); return r; } @@ -4320,7 +4273,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { - DRM_ERROR("Failed to register vupdate irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); return -EINVAL; } @@ -4339,7 +4292,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -4350,7 +4303,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { - DRM_ERROR("Failed to register pflip irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); return -EINVAL; } @@ -4368,7 +4321,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, client_id, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } @@ -4418,7 +4371,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); if (r) { - DRM_ERROR("Failed to add crtc irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n"); return r; } @@ -4429,7 +4382,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 || int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) { - DRM_ERROR("Failed to register vblank irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n"); return -EINVAL; } @@ -4450,7 +4403,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) vrtl_int_srcid[i], &adev->vline0_irq); if (r) { - DRM_ERROR("Failed to add vline0 irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n"); return r; } @@ -4461,7 +4414,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 || int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) { - DRM_ERROR("Failed to register vline0 irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n"); return -EINVAL; } @@ -4489,7 +4442,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n"); return r; } @@ -4500,7 +4453,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 || int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) { - DRM_ERROR("Failed to register vupdate irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n"); return -EINVAL; } @@ -4520,7 +4473,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) i++) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); if (r) { - DRM_ERROR("Failed to add page flip irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n"); return r; } @@ -4531,7 +4484,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) if (int_params.irq_source == DC_IRQ_SOURCE_INVALID || int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST || int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) { - DRM_ERROR("Failed to register pflip irq!\n"); + drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n"); return -EINVAL; } @@ -4549,7 +4502,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, &adev->hpd_irq); if (r) { - DRM_ERROR("Failed to add hpd irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n"); return r; } @@ -4571,7 +4524,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, &adev->dmub_outbox_irq); if (r) { - DRM_ERROR("Failed to add outbox irq id!\n"); + drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n"); return r; } @@ -4803,45 +4756,72 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, return 1; } -static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, - uint32_t brightness) +/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */ +static inline u32 scale_input_to_fw(int min, int max, u64 input) { - unsigned int min, max; - u8 prev_signal = 0, prev_lum = 0; + return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min); +} - if (!get_brightness_range(caps, &min, &max)) - return brightness; +/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */ +static inline u32 scale_fw_to_input(int min, int max, u64 input) +{ + return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL); +} - for (int i = 0; i < caps->data_points; i++) { - u8 signal, lum; +static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, + unsigned int min, unsigned int max, + uint32_t *user_brightness) +{ + u32 brightness = scale_input_to_fw(min, max, *user_brightness); + u8 prev_signal = 0, prev_lum = 0; + int i = 0; - if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) - break; + if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) + return; - signal = caps->luminance_data[i].input_signal; - lum = caps->luminance_data[i].luminance; + if (!caps->data_points) + return; + + /* choose start to run less interpolation steps */ + if (caps->luminance_data[caps->data_points/2].input_signal > brightness) + i = caps->data_points/2; + do { + u8 signal = caps->luminance_data[i].input_signal; + u8 lum = caps->luminance_data[i].luminance; /* * brightness == signal: luminance is percent numerator * brightness < signal: interpolate between previous and current luminance numerator * brightness > signal: find next data point */ - if (brightness < signal) - lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * - (brightness - prev_signal), - signal - prev_signal); - else if (brightness > signal) { + if (brightness > signal) { prev_signal = signal; prev_lum = lum; + i++; continue; } - brightness = DIV_ROUND_CLOSEST(lum * brightness, 101); - break; - } + if (brightness < signal) + lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * + (brightness - prev_signal), + signal - prev_signal); + *user_brightness = scale_fw_to_input(min, max, + DIV_ROUND_CLOSEST(lum * brightness, 101)); + return; + } while (i < caps->data_points); +} + +static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned int min, max; + + if (!get_brightness_range(caps, &min, &max)) + return brightness; - // Rescale 0..255 to min..max - return min + DIV_ROUND_CLOSEST((max - min) * brightness, - AMDGPU_MAX_BL_LEVEL); + convert_custom_brightness(caps, min, max, &brightness); + + // Rescale 0..max to min..max + return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max); } static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, @@ -4854,8 +4834,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap if (brightness < min) return 0; - // Rescale min..max to 0..255 - return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + // Rescale min..max to 0..max + return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min), max - min); } @@ -4885,6 +4865,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, reallow_idle = true; } + if (trace_amdgpu_dm_brightness_enabled()) { + trace_amdgpu_dm_brightness(__builtin_return_address(0), + user_brightness, + brightness, + caps->aux_support, + power_supply_is_system_supplied() > 0); + } + if (caps->aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); @@ -4980,8 +4968,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) struct drm_device *drm = aconnector->base.dev; struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; struct backlight_properties props = { 0 }; - struct amdgpu_dm_backlight_caps caps = { 0 }; + struct amdgpu_dm_backlight_caps *caps; char bl_name[16]; + int min, max; if (aconnector->bl_idx == -1) return; @@ -4993,18 +4982,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) return; } - amdgpu_acpi_get_backlight_caps(&caps); - if (caps.caps_valid) { + caps = &dm->backlight_caps[aconnector->bl_idx]; + if (get_brightness_range(caps, &min, &max)) { if (power_supply_is_system_supplied() > 0) - props.brightness = caps.ac_level; + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100); else - props.brightness = caps.dc_level; + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100); + /* min is zero, so max needs to be adjusted */ + props.max_brightness = max - min; + drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, + caps->ac_level, caps->dc_level); } else - props.brightness = AMDGPU_MAX_BL_LEVEL; + props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL; - if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) + if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) drm_info(drm, "Using custom brightness curve\n"); - props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", @@ -5016,10 +5008,10 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) dm->brightness[aconnector->bl_idx] = props.brightness; if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { - DRM_ERROR("DM: Backlight registration failed!\n"); + drm_err(drm, "DM: Backlight registration failed!\n"); dm->backlight_dev[aconnector->bl_idx] = NULL; } else - DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); + drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name); } static int initialize_plane(struct amdgpu_display_manager *dm, @@ -5033,7 +5025,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); if (!plane) { - DRM_ERROR("KMS: Failed to allocate plane\n"); + drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n"); return -ENOMEM; } plane->type = plane_type; @@ -5051,7 +5043,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); if (ret) { - DRM_ERROR("KMS: Failed to initialize plane\n"); + drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n"); kfree(plane); return ret; } @@ -5120,14 +5112,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { - DRM_ERROR("DM: Failed to initialize mode config\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n"); return -EINVAL; } /* There is one primary plane per CRTC */ primary_planes = dm->dc->caps.max_streams; if (primary_planes > AMDGPU_MAX_PLANES) { - DRM_ERROR("DM: Plane nums out of 6 planes\n"); + drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n"); return -EINVAL; } @@ -5140,7 +5132,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (initialize_plane(dm, mode_info, i, DRM_PLANE_TYPE_PRIMARY, plane)) { - DRM_ERROR("KMS: Failed to initialize primary plane\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n"); goto fail; } } @@ -5172,14 +5164,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (initialize_plane(dm, NULL, primary_planes + i, DRM_PLANE_TYPE_OVERLAY, plane)) { - DRM_ERROR("KMS: Failed to initialize overlay plane\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n"); goto fail; } } for (i = 0; i < dm->dc->caps.max_streams; i++) if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { - DRM_ERROR("KMS: Failed to initialize crtc\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n"); goto fail; } @@ -5199,7 +5191,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): if (register_outbox_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -5249,7 +5241,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } if (link_cnt > MAX_LINKS) { - DRM_ERROR( + drm_err(adev_to_drm(adev), "KMS: Cannot support more than %d display indexes\n", MAX_LINKS); goto fail; @@ -5265,12 +5257,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); if (!wbcon) { - DRM_ERROR("KMS: Failed to allocate writeback connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n"); continue; } if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { - DRM_ERROR("KMS: Failed to initialize writeback connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n"); kfree(wbcon); continue; } @@ -5290,12 +5282,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { - DRM_ERROR("KMS: Failed to initialize encoder\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n"); goto fail; } if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { - DRM_ERROR("KMS: Failed to initialize connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n"); goto fail; } @@ -5304,7 +5296,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) aconnector; if (!dc_link_detect_connection_type(link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(link); @@ -5326,8 +5318,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (amdgpu_dm_set_replay_caps(link, aconnector)) psr_feature_enabled = false; - if (psr_feature_enabled) + if (psr_feature_enabled) { amdgpu_dm_set_psr_caps(link); + drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", + link->psr_settings.psr_feature_enabled, + link->psr_settings.psr_version, + link->dpcd_caps.psr_info.psr_version, + link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); + } } } amdgpu_set_panel_orientation(&aconnector->base); @@ -5341,7 +5340,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_VERDE: case CHIP_OLAND: if (dce60_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -5363,7 +5362,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: if (dce110_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; @@ -5391,12 +5390,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): if (dcn10_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n"); goto fail; } break; default: - DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", + drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n", amdgpu_ip_version(adev, DCE_HWIP, 0)); goto fail; } @@ -5413,7 +5412,8 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { - drm_atomic_private_obj_fini(&dm->atomic_obj); + if (dm->atomic_obj.state) + drm_atomic_private_obj_fini(&dm->atomic_obj); } /****************************************************************************** @@ -5557,7 +5557,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block) /* if there is no object header, skip DM */ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; - dev_info(adev->dev, "No object header, skipping DM\n"); + drm_info(adev_to_drm(adev), "No object header, skipping DM\n"); return -ENOENT; } @@ -5669,7 +5669,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block) adev->mode_info.num_dig = 4; break; default: - DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", + drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n", amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL; } @@ -5818,7 +5818,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; break; default: - DRM_ERROR( + drm_err(adev_to_drm(adev), "Unsupported screen format %p4cc\n", &fb->format->format); return -EINVAL; @@ -6339,6 +6339,7 @@ static void fill_stream_properties_from_drm_display_mode( struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + ssize_t err; if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) aconnector = to_amdgpu_dm_connector(connector); @@ -6385,9 +6386,17 @@ static void fill_stream_properties_from_drm_display_mode( } if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { - drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err); timing_out->vic = avi_frame.video_code; - drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, + (struct drm_connector *)connector, + mode_in); + if (err < 0) + drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err); timing_out->hdmi_vic = hv_frame.vic; } @@ -6500,19 +6509,19 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, const struct drm_display_mode *native_mode, bool scale_enabled) { - if (scale_enabled) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); - } else if (native_mode->clock == drm_mode->clock && - native_mode->htotal == drm_mode->htotal && - native_mode->vtotal == drm_mode->vtotal) { - copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + if (scale_enabled || ( + native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal)) { + if (native_mode->crtc_clock) + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); } else { /* no scaling nor amdgpu inserted, no need to patch */ } } static struct dc_sink * -create_fake_sink(struct dc_link *link) +create_fake_sink(struct drm_device *dev, struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; @@ -6522,7 +6531,7 @@ create_fake_sink(struct dc_link *link) sink = dc_sink_create(&sink_init_data); if (!sink) { - DRM_ERROR("Failed to create sink!\n"); + drm_err(dev, "Failed to create sink!\n"); return NULL; } sink->sink_signal = SIGNAL_TYPE_VIRTUAL; @@ -6655,7 +6664,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, m_pref = list_first_entry_or_null( &aconnector->base.modes, struct drm_display_mode, head); if (!m_pref) { - DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n"); return NULL; } } @@ -6830,7 +6839,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { @@ -6850,7 +6859,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", + drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } @@ -6878,6 +6887,7 @@ create_stream_for_sink(struct drm_connector *connector, const struct dc_stream_state *old_stream, int requested_bpc) { + struct drm_device *dev = connector->dev; struct amdgpu_dm_connector *aconnector = NULL; struct drm_display_mode *preferred_mode = NULL; const struct drm_connector_state *con_state = &dm_state->base; @@ -6900,11 +6910,6 @@ create_stream_for_sink(struct drm_connector *connector, drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); - if (connector == NULL) { - DRM_ERROR("connector is NULL!\n"); - return stream; - } - if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { aconnector = NULL; aconnector = to_amdgpu_dm_connector(connector); @@ -6919,7 +6924,7 @@ create_stream_for_sink(struct drm_connector *connector, } if (!aconnector || !aconnector->dc_sink) { - sink = create_fake_sink(link); + sink = create_fake_sink(dev, link); if (!sink) return stream; @@ -6931,7 +6936,7 @@ create_stream_for_sink(struct drm_connector *connector, stream = dc_create_stream_for_sink(sink); if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); + drm_err(dev, "Failed to create stream for sink!\n"); goto finish; } @@ -6963,7 +6968,7 @@ create_stream_for_sink(struct drm_connector *connector, * case, we call set mode ourselves to restore the previous mode * and the modelist may not be filled in time. */ - DRM_DEBUG_DRIVER("No preferred mode found\n"); + drm_dbg_driver(dev, "No preferred mode found\n"); } else if (aconnector) { recalculate_timing = amdgpu_freesync_vid_mode && is_freesync_video_mode(&mode, aconnector); @@ -7413,6 +7418,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct dc_sink *dc_em_sink = aconnector->dc_em_sink; const struct drm_edid *drm_edid; struct i2c_adapter *ddc; + struct drm_device *dev = connector->dev; if (dc_link && dc_link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; @@ -7422,7 +7428,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) drm_edid = drm_edid_read_ddc(connector, ddc); drm_edid_connector_update(connector, drm_edid); if (!drm_edid) { - DRM_ERROR("No EDID found on connector: %s.\n", connector->name); + drm_err(dev, "No EDID found on connector: %s.\n", connector->name); return; } @@ -7481,7 +7487,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) drm_edid = drm_edid_read_ddc(connector, ddc); drm_edid_connector_update(connector, drm_edid); if (!drm_edid) { - DRM_ERROR("No EDID found on connector: %s.\n", connector->name); + drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name); return; } @@ -7573,7 +7579,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_result = DC_FAIL_ATTACH_SURFACES; if (dc_result == DC_OK) - dc_result = dc_validate_global_state(dc, dc_state, true); + dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY); cleanup: if (dc_state) @@ -7615,7 +7621,7 @@ create_validate_stream_for_sink(struct drm_connector *connector, dm_state, old_stream, requested_bpc); if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); + drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n"); break; } @@ -7631,7 +7637,7 @@ create_validate_stream_for_sink(struct drm_connector *connector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", + DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, @@ -7690,7 +7696,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && aconnector->base.force != DRM_FORCE_ON) { - DRM_ERROR("dc_sink is NULL!\n"); + drm_err(connector->dev, "dc_sink is NULL!\n"); goto fail; } @@ -7898,6 +7904,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; + if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || + (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + enum drm_mode_status result; + + result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode); + if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) { + drm_dbg_driver(encoder->dev, + "mode %dx%d@%dHz is not native, enabling scaling\n", + adjusted_mode->hdisplay, adjusted_mode->vdisplay, + drm_mode_vrefresh(adjusted_mode)); + dm_new_connector_state->scaling = RMX_FULL; + } + return 0; + } + if (!aconnector->mst_output_port) return 0; @@ -8355,7 +8378,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); - if (encoder) + if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) && + (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } @@ -8598,7 +8622,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, i2c = create_i2c(link->ddc, false); if (!i2c) { - DRM_ERROR("Failed to create i2c adapter data\n"); + drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n"); return -ENOMEM; } @@ -8606,7 +8630,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, res = i2c_add_adapter(&i2c->base); if (res) { - DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); + drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); goto out_free; } @@ -8620,7 +8644,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, &i2c->base); if (res) { - DRM_ERROR("connector_init failed\n"); + drm_err(adev_to_drm(dm->adev), "connector_init failed\n"); aconnector->connector_id = -1; goto out_free; } @@ -8707,14 +8731,39 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, int offdelay; if (acrtc_state) { - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < - IP_VERSION(3, 5, 0) || - acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED || - !(adev->flags & AMD_IS_APU)) { - timing = &acrtc_state->stream->timing; - - /* at least 2 frames */ + timing = &acrtc_state->stream->timing; + + /* + * Depending on when the HW latching event of double-buffered + * registers happen relative to the PSR SDP deadline, and how + * bad the Panel clock has drifted since the last ALPM off + * event, there can be up to 3 frames of delay between sending + * the PSR exit cmd to DMUB fw, and when the panel starts + * displaying live frames. + * + * We can set: + * + * 20/100 * offdelay_ms = 3_frames_ms + * => offdelay_ms = 5 * 3_frames_ms + * + * This ensures that `3_frames_ms` will only be experienced as a + * 20% delay on top how long the display has been static, and + * thus make the delay less perceivable. + */ + if (acrtc_state->stream->link->psr_settings.psr_version < + DC_PSR_VERSION_UNSUPPORTED) { + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * + timing->v_total * + timing->h_total, + timing->pix_clk_100hz); + config.offdelay_ms = offdelay ?: 30; + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < + IP_VERSION(3, 5, 0) || + !(adev->flags & AMD_IS_APU)) { + /* + * Older HW and DGPU have issues with instant off; + * use a 2 frame offdelay. + */ offdelay = DIV64_U64_ROUND_UP((u64)20 * timing->v_total * timing->h_total, @@ -8722,6 +8771,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, config.offdelay_ms = offdelay ?: 30; } else { + /* offdelay_ms = 0 will never disable vblank */ + config.offdelay_ms = 1; config.disable_immediate = true; } @@ -9083,7 +9134,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: @@ -9091,7 +9142,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, */ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); } } @@ -9178,13 +9229,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane, if (crtc_state->stream) { if (!dc_stream_set_cursor_attributes(crtc_state->stream, &attributes)) - DRM_ERROR("DC failed to set cursor attributes\n"); + drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n"); update->cursor_attributes = &crtc_state->stream->cursor_attributes; if (!dc_stream_set_cursor_position(crtc_state->stream, &position)) - DRM_ERROR("DC failed to set cursor position\n"); + drm_err(adev_to_drm(adev), "DC failed to set cursor position\n"); update->cursor_position = &crtc_state->stream->cursor_position; } @@ -9435,7 +9486,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (!bundle->surface_updates[planes_count].surface) { - DRM_ERROR("No surface for CRTC: id=%d\n", + drm_err(dev, "No surface for CRTC: id=%d\n", acrtc_attach->crtc_id); continue; } @@ -9951,20 +10002,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm, wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); if (!wb_info) { - DRM_ERROR("Failed to allocate wb_info\n"); + drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n"); return; } acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); if (!acrtc) { - DRM_ERROR("no amdgpu_crtc found\n"); + drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n"); kfree(wb_info); return; } afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); if (!afb) { - DRM_ERROR("No amdgpu_framebuffer found\n"); + drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n"); kfree(wb_info); return; } @@ -10185,7 +10236,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) enable_encryption = true; - DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); if (aconnector->dc_link) hdcp_update_display( @@ -10277,7 +10328,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); if (!dummy_updates) { - DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); + drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); continue; } for (j = 0; j < status->plane_count; j++) @@ -10485,16 +10536,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector) */ conn_state = drm_atomic_get_connector_state(state, connector); - ret = PTR_ERR_OR_ZERO(conn_state); - if (ret) + /* Check for error in getting connector state */ + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); goto out; + } /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); - ret = PTR_ERR_OR_ZERO(crtc_state); - if (ret) + /* Check for error in getting crtc state */ + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); goto out; + } /* force a restore */ crtc_state->mode_changed = true; @@ -10502,9 +10557,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector) /* Attach plane to drm_atomic_state */ plane_state = drm_atomic_get_plane_state(state, plane); - ret = PTR_ERR_OR_ZERO(plane_state); - if (ret) + /* Check for error in getting plane state */ + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); goto out; + } /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); @@ -10512,7 +10569,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector) out: drm_atomic_state_put(state); if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_err(ddev, "Restoring old state failed with %i\n", ret); return ret; } @@ -10596,7 +10653,7 @@ static int do_aquire_global_lock(struct drm_device *dev, &commit->flip_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", + drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n", crtc->base.id, crtc->name); drm_crtc_commit_put(commit); @@ -10712,6 +10769,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct dm_atomic_state *dm_state = NULL; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct dc_stream_state *new_stream; + struct amdgpu_device *adev = dm->adev; int ret = 0; /* @@ -10741,8 +10799,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, drm_old_conn_state = drm_atomic_get_old_connector_state(state, connector); - if (IS_ERR(drm_new_conn_state)) { - ret = PTR_ERR_OR_ZERO(drm_new_conn_state); + if (WARN_ON(!drm_new_conn_state)) { + ret = -EINVAL; goto fail; } @@ -10765,7 +10823,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, */ if (!new_stream) { - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n", __func__, acrtc->base.base.id); ret = -ENOMEM; goto fail; @@ -10803,7 +10861,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", + drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d", new_crtc_state->mode_changed); } } @@ -10841,7 +10899,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER( + drm_dbg_driver(adev_to_drm(adev), "Mode change not required for front porch change, setting mode_changed to %d", new_crtc_state->mode_changed); @@ -10862,7 +10920,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (ret) goto fail; - DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", + drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n", crtc->base.id); /* i.e. reset mode */ @@ -10995,6 +11053,9 @@ static bool should_reset_plane(struct drm_atomic_state *state, state->allow_modeset) return true; + if (amdgpu_in_reset(adev) && state->allow_modeset) + return true; + /* Exit early if we know that we're adding or removing the plane. */ if (old_plane_state->crtc != new_plane_state->crtc) return true; @@ -11712,7 +11773,7 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev, old_plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) { - DRM_ERROR("Failed to get plane state for plane %s\n", plane->name); + drm_err(dev, "Failed to get plane state for plane %s\n", plane->name); return false; } @@ -12158,7 +12219,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } - status = dc_validate_global_state(dc, dm_state->context, true); + status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY); if (status != DC_OK) { drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", dc_status_to_str(status), status); @@ -12281,7 +12342,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { - DRM_ERROR("EDID CEA parser failed\n"); + drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n"); return false; } @@ -12289,7 +12350,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, if (output->type == DMUB_CMD__EDID_CEA_ACK) { if (!output->ack.success) { - DRM_ERROR("EDID CEA ack failed at offset %d\n", + drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n", output->ack.offset); } } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { @@ -12301,7 +12362,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; } else { - DRM_WARN("Unknown EDID CEA parser results\n"); + drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n"); return false; } @@ -12517,7 +12578,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; if (!connector->state) { - DRM_ERROR("%s - Connector has no state", __func__); + drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__); goto update; } @@ -12702,7 +12763,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( } if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { - DRM_ERROR("wait_for_completion_timeout timeout!"); + drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); *operation_result = AUX_RET_ERROR_TIMEOUT; goto out; } @@ -12712,31 +12773,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( * Transient states before tunneling is enabled could * lead to this error. We can ignore this for now. */ - if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { - DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", + if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { + drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n", payload->address, payload->length, p_notify->result); } - *operation_result = AUX_RET_ERROR_INVALID_REPLY; + *operation_result = p_notify->result; goto out; } + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; + if (adev->dm.dmub_notify->aux_reply.command & 0xF0) + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && p_notify->aux_reply.length && - (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { - - if (payload->length != p_notify->aux_reply.length) { - DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", - p_notify->aux_reply.length, - payload->address, payload->length); - *operation_result = AUX_RET_ERROR_INVALID_REPLY; - goto out; - } - + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); - } /* success */ ret = p_notify->aux_reply.length; @@ -12747,6 +12801,79 @@ out: return ret; } +static void abort_fused_io( + struct dc_context *ctx, + const struct dmub_cmd_fused_request *request +) +{ + union dmub_rb_cmd command = { 0 }; + struct dmub_rb_cmd_fused_io *io = &command.fused_io; + + io->header.type = DMUB_CMD__FUSED_IO; + io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT; + io->header.payload_bytes = sizeof(*io) - sizeof(io->header); + io->request = *request; + dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT); +} + +static bool execute_fused_io( + struct amdgpu_device *dev, + struct dc_context *ctx, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +) +{ + const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line; + + if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io)) + return false; + + struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line]; + struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io; + const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + && first->header.ret_status + && first->request.status == FUSED_REQUEST_STATUS_SUCCESS; + + if (!result) + return false; + + while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) { + reinit_completion(&sync->replied); + + struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data; + + static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch"); + + if (reply->identifier == first->request.identifier) { + first->request = *reply; + return true; + } + } + + reinit_completion(&sync->replied); + first->request.status = FUSED_REQUEST_STATUS_TIMEOUT; + abort_fused_io(ctx, &first->request); + return false; +} + +bool amdgpu_dm_execute_fused_io( + struct amdgpu_device *dev, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us) +{ + struct amdgpu_display_manager *dm = &dev->dm; + + mutex_lock(&dm->dpia_aux_lock); + + const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us); + + mutex_unlock(&dm->dpia_aux_lock); + return result; +} + int amdgpu_dm_process_dmub_set_config_sync( struct dc_context *ctx, unsigned int link_index, @@ -12765,7 +12892,7 @@ int amdgpu_dm_process_dmub_set_config_sync( ret = 0; *operation_result = adev->dm.dmub_notify->sc_status; } else { - DRM_ERROR("wait_for_completion_timeout timeout!"); + drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!"); ret = -1; *operation_result = SET_CONFIG_UNKNOWN_ERROR; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 385faaca6e26..b937da0a4e4a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,7 +50,7 @@ #define AMDGPU_DM_MAX_NUM_EDP 2 -#define AMDGPU_DMUB_NOTIFICATION_MAX 7 +#define AMDGPU_DMUB_NOTIFICATION_MAX 8 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 @@ -81,6 +81,7 @@ struct amdgpu_bo; struct dmub_srv; struct dc_plane_state; struct dmub_notification; +struct dmub_cmd_fused_request; struct amd_vsdb_block { unsigned char ieee_id[3]; @@ -276,6 +277,10 @@ struct hpd_rx_irq_offload_work { * @offload_wq: offload work queue that this work is queued to */ struct hpd_rx_irq_offload_work_queue *offload_wq; + /** + * @adev: amdgpu_device pointer + */ + struct amdgpu_device *adev; }; /** @@ -614,6 +619,13 @@ struct amdgpu_display_manager { bool aux_hpd_discon_quirk; /** + * @edp0_on_dp1_quirk: + * + * quirk for platforms that put edp0 on DP1. + */ + bool edp0_on_dp1_quirk; + + /** * @dpia_aux_lock: * * Guards access to DPIA AUX @@ -624,8 +636,9 @@ struct amdgpu_display_manager { * @bb_from_dmub: * * Bounding box data read from dmub during early initialization for DCN4+ + * Data is stored as a byte array that should be casted to the appropriate bb struct */ - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; /** * @oem_i2c: @@ -633,6 +646,16 @@ struct amdgpu_display_manager { * OEM i2c bus */ struct amdgpu_i2c_adapter *oem_i2c; + + /** + * @fused_io: + * + * dmub fused io interface + */ + struct fused_io_sync { + struct completion replied; + char reply_data[0x40]; // Cannot include dmub_cmd here + } fused_io[8]; }; enum dsc_clock_force_state { @@ -1012,6 +1035,14 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, struct aux_payload *payload, enum aux_return_code_type *operation_result); +bool amdgpu_dm_execute_fused_io( + struct amdgpu_device *dev, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +); + int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, struct set_config_cmd_payload *payload, enum set_config_status *operation_result); @@ -1045,4 +1076,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector); void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector); int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector); +void retrieve_dmi_info(struct amdgpu_display_manager *dm); + #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 36a830a7440f..2551823382f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -113,6 +113,7 @@ bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state) * * Panel Replay and PSR SU * - Enable when: + * - VRR is disabled * - vblank counter is disabled * - entry is allowed: usermode demonstrates an adequate number of fast * commits) @@ -131,19 +132,20 @@ static void amdgpu_dm_crtc_set_panel_sr_feature( bool is_sr_active = (link->replay_settings.replay_allow_active || link->psr_settings.psr_allow_active); bool is_crc_window_active = false; + bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc); #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY is_crc_window_active = amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); #endif - if (link->replay_settings.replay_feature_enabled && + if (link->replay_settings.replay_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { amdgpu_dm_replay_enable(vblank_work->stream, true); } else if (vblank_enabled) { if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) amdgpu_dm_psr_disable(vblank_work->stream, false); - } else if (link->psr_settings.psr_feature_enabled && + } else if (link->psr_settings.psr_feature_enabled && !vrr_active && allow_sr_entry && !is_sr_active && !is_crc_window_active) { struct amdgpu_dm_connector *aconn = @@ -726,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, * support programmable degamma anywhere. */ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, + /* Dont't enable DRM CRTC degamma property for DCN401 since the + * pre-blending degamma LUT doesn't apply to cursor, and therefore + * can't work similar to a post-blending degamma LUT as in other hw + * versions. + * TODO: revisit it once KMS plane color API is merged. + */ + drm_crtc_enable_color_mgmt(&acrtc->base, + (is_dcn && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? + MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index c7d13e743e6c..b726bcd18e29 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3988,7 +3988,7 @@ static int capabilities_show(struct seq_file *m, void *unused) struct hubbub *hubbub = dc->res_pool->hubbub; - if (hubbub->funcs->get_mall_en) + if (hubbub && hubbub->funcs->get_mall_en) hubbub->funcs->get_mall_en(hubbub, &mall_in_use); if (dc->cap_funcs.get_subvp_en) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5198a079b463..b1d1897f5eaf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -26,6 +26,7 @@ #include "amdgpu_dm_hdcp.h" #include "amdgpu.h" #include "amdgpu_dm.h" +#include "dc_fused_io.h" #include "dm_helpers.h" #include <drm/display/drm_hdcp_helper.h> #include "hdcp_psp.h" @@ -76,6 +77,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); } +static bool lp_atomic_write_poll_read_i2c( + void *handle, + const struct mod_hdcp_atomic_op_i2c *write, + const struct mod_hdcp_atomic_op_i2c *poll, + struct mod_hdcp_atomic_op_i2c *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) +{ + struct dc_link *link = handle; + + return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb); +} + +static bool lp_atomic_write_poll_read_aux( + void *handle, + const struct mod_hdcp_atomic_op_aux *write, + const struct mod_hdcp_atomic_op_aux *poll, + struct mod_hdcp_atomic_op_aux *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) +{ + struct dc_link *link = handle; + + return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb); +} + static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) { struct ta_hdcp_shared_memory *hdcp_cmd; @@ -173,6 +202,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int conn_index = aconnector->base.index; guard(mutex)(&hdcp_w->mutex); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); hdcp_w->aconnector[conn_index] = aconnector; memset(&link_adjust, 0, sizeof(link_adjust)); @@ -220,7 +252,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int conn_index = aconnector->base.index; guard(mutex)(&hdcp_w->mutex); - hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -236,7 +267,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } process_output(hdcp_w); } @@ -254,6 +288,10 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } } process_output(hdcp_w); @@ -488,6 +526,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_work = handle; struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; int link_index = aconnector->dc_link->link_index; + unsigned int conn_index = aconnector->base.index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -544,7 +583,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) guard(mutex)(&hdcp_w->mutex); mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); - + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; process_output(hdcp_w); } @@ -681,8 +723,8 @@ ret: static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */ - .write_new = srm_data_write, - .read_new = srm_data_read, + .write = srm_data_write, + .read = srm_data_read, }; struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, @@ -719,7 +761,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); - hdcp_work[i].hdcp.config.psp.handle = &adev->psp; + struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config; + struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs; + + config->psp.handle = &adev->psp; if (dc->ctx->dce_version == DCN_VERSION_3_1 || dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || @@ -727,12 +772,22 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, dc->ctx->dce_version == DCN_VERSION_3_51 || dc->ctx->dce_version == DCN_VERSION_3_6 || dc->ctx->dce_version == DCN_VERSION_3_16) - hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; - hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); - hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; - hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; - hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; - hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + config->psp.caps.dtm_v3_supported = 1; + config->ddc.handle = dc_get_link_at_index(dc, i); + + ddc_funcs->write_i2c = lp_write_i2c; + ddc_funcs->read_i2c = lp_read_i2c; + ddc_funcs->write_dpcd = lp_write_dpcd; + ddc_funcs->read_dpcd = lp_read_dpcd; + + config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback; + if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) { + ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; + ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; + } else { + ddc_funcs->atomic_write_poll_read_i2c = NULL; + ddc_funcs->atomic_write_poll_read_aux = NULL; + } memset(hdcp_work[i].aconnector, 0, sizeof(struct amdgpu_dm_connector *) * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 2cd35392e2da..9e3e51a2dc49 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -630,6 +630,19 @@ bool dm_helpers_submit_i2c( return result; } +bool dm_helpers_execute_fused_io( + struct dc_context *ctx, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +) +{ + struct amdgpu_device *dev = ctx->driver_context; + + return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us); +} + static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, bool is_write_cmd, unsigned char cmd, @@ -918,7 +931,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) { struct drm_connector *connector = data; struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); - unsigned char start = block * EDID_LENGTH; + unsigned short start = block * EDID_LENGTH; struct edid *edid; int r; @@ -1016,6 +1029,10 @@ enum dc_edid_status dm_helpers_read_local_edid( return EDID_NO_RESPONSE; edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + if (!edid || + edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH) + return EDID_BAD_INPUT; + sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7ceedf626d23..7187d5aedf0a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -51,6 +51,9 @@ #define PEAK_FACTOR_X1000 1006 +/* + * This function handles both native AUX and I2C-Over-AUX transactions. + */ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -59,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; + uint8_t copy[16]; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -74,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; + if (payload.write) { + memcpy(copy, msg->buffer, msg->size); + payload.data = copy; + } + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); @@ -87,15 +96,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (adev->dm.aux_hpd_discon_quirk) { if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && operation_result == AUX_RET_ERROR_HPD_DISCON) { - result = 0; + result = msg->size; operation_result = AUX_RET_SUCCESS; } } - if (payload.write && result >= 0) - result = msg->size; + /* + * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER + */ + if (payload.write && result >= 0) { + if (result) { + /*one byte indicating partially written bytes*/ + drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n"); + result = payload.data[0]; + } else if (!payload.reply[0]) + /*I2C_ACK|AUX_ACK*/ + result = msg->size; + } - if (result < 0) + if (result < 0) { switch (operation_result) { case AUX_RET_SUCCESS: break; @@ -114,6 +133,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } + drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result); + } + + if (payload.reply[0]) + drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.", + payload.reply[0]); + return result; } @@ -1713,16 +1739,17 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, struct dc_dsc_bw_range *bw_range) { struct dc_dsc_policy dsc_policy = {0}; + bool is_dsc_possible; dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); - dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], - stream->sink->ctx->dc->debug.dsc_min_slice_height_override, - dsc_policy.min_target_bpp * 16, - dsc_policy.max_target_bpp * 16, - &stream->sink->dsc_caps.dsc_dec_caps, - &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); - - return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; + is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp * 16, + dsc_policy.max_target_bpp * 16, + &stream->sink->dsc_caps.dsc_dec_caps, + &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); + + return is_dsc_possible; } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 3e0f45f1711c..eef51652ca35 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -92,9 +92,9 @@ enum dm_micro_swizzle { MICRO_SWIZZLE_R = 3 }; -const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier) { - return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); + return amdgpu_lookup_format_info(pixel_format, modifier); } void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, @@ -948,13 +948,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, adev = amdgpu_ttm_adev(rbo->tbo.bdev); r = amdgpu_bo_reserve(rbo, true); if (r) { - dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r); return r; } r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); if (r) { - dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r); goto error_unlock; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 615d2ab2b803..ea2619b507db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, unsigned long possible_crtcs, const struct dc_plane_cap *plane_cap); -const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier); void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index e140b7a04d72..ff7b867ae98b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -87,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link) link->psr_settings.psr_feature_enabled = true; } - - DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", - link->psr_settings.psr_feature_enabled, - link->psr_settings.psr_version, - link->dpcd_caps.psr_info.psr_version, - link->dpcd_caps.psr_info.psr_dpcd_caps.raw, - link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); - } /* @@ -127,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); - if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) - return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + } ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c new file mode 100644 index 000000000000..1da07ebf9217 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/dmi.h> + +#include "amdgpu.h" +#include "amdgpu_dm.h" + +struct amdgpu_dm_quirks { + bool aux_hpd_discon; + bool support_edp0_on_dp1; +}; + +static struct amdgpu_dm_quirks quirk_entries = { + .aux_hpd_discon = false, + .support_edp0_on_dp1 = false +}; + +static int edp0_on_dp1_callback(const struct dmi_system_id *id) +{ + quirk_entries.support_edp0_on_dp1 = true; + return 0; +} + +static int aux_hpd_discon_callback(const struct dmi_system_id *id) +{ + quirk_entries.aux_hpd_discon = true; + return 0; +} + +static const struct dmi_system_id dmi_quirk_table[] = { + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .callback = aux_hpd_discon_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"), + }, + }, + {} + /* TODO: refactor this from a fixed table to a dynamic option */ +}; + +void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + struct drm_device *dev = dm->ddev; + int dmi_id; + + dm->aux_hpd_discon_quirk = false; + dm->edp0_on_dp1_quirk = false; + + dmi_id = dmi_check_system(dmi_quirk_table); + + if (!dmi_id) + return; + + if (quirk_entries.aux_hpd_discon) { + dm->aux_hpd_discon_quirk = true; + drm_info(dev, "aux_hpd_discon_quirk attached\n"); + } + if (quirk_entries.support_edp0_on_dp1) { + dm->edp0_on_dp1_quirk = true; + drm_info(dev, "support_edp0_on_dp1 attached\n"); + } +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 4686d4b0cbad..95f890fda8aa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -726,6 +726,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state, ) ); +TRACE_EVENT(amdgpu_dm_brightness, + TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac), + TP_ARGS(function, user_brightness, converted_brightness, aux, ac), + TP_STRUCT__entry( + __field(void *, function) + __field(u32, user_brightness) + __field(u32, converted_brightness) + __field(bool, aux) + __field(bool, ac) + ), + TP_fast_assign( + __entry->function = function; + __entry->user_brightness = user_brightness; + __entry->converted_brightness = converted_brightness; + __entry->aux = aux; + __entry->ac = ac; + ), + TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s", + (void *)__entry->function, + (u32)__entry->user_brightness, + (u32)__entry->converted_brightness, + (__entry->aux) ? "true" : "false", + (__entry->ac) ? "AC" : "DC" + ) +); + #endif /* _AMDGPU_DM_TRACE_H_ */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c index 0d5fefb0f591..d9527c05fc87 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector r = amdgpu_bo_reserve(rbo, true); if (r) { - dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r); return r; } r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); if (r) { - dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r); goto error_unlock; } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 3e1f5b689718..3c9ecea7eebc 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -53,31 +53,30 @@ DC_LIBS += hdcp ifdef CONFIG_DRM_AMD_DC_FP DC_LIBS += sspl -DC_SPL_TRANS += dc_spl_translate.o +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o) endif AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) include $(AMD_DC) -DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o +FILES = +FILES += dc_dmub_srv.o +FILES += dc_edid_parser.o +FILES += dc_fused_io.o +FILES += dc_helper.o +FILES += core/dc.o +FILES += core/dc_debug.o +FILES += core/dc_hw_sequencer.o +FILES += core/dc_link_enc_cfg.o +FILES += core/dc_link_exports.o +FILES += core/dc_resource.o +FILES += core/dc_sink.o +FILES += core/dc_stat.o +FILES += core/dc_state.o +FILES += core/dc_stream.o +FILES += core/dc_surface.o +FILES += core/dc_vm_helper.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES)) -DISPLAY_CORE += dc_vm_helper.o - -AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) - -AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) - -AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS)) - -AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) -AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) - -DC_DMUB += dc_dmub_srv.o -DC_EDID += dc_edid_parser.o -AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) -AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID)) -AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID) - -AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS) diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index 681799468487..d897f8a30ede 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -1393,7 +1393,7 @@ static void calculate_bandwidth( if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { /*determine the minimum dram clock change margin for each set of clock frequencies*/ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); - /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ + /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/ data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { data->display_pstate_change_enable[k] = 1; @@ -1407,7 +1407,7 @@ static void calculate_bandwidth( if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { /*determine the minimum dram clock change margin for each display pipe*/ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); - /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ + /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/ data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { data->display_pstate_change_enable[k] = 1; diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index 88d3f9d7dd55..452206b5095e 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -51,8 +51,6 @@ static inline unsigned long long complete_integer_division_u64( { unsigned long long result; - ASSERT(divisor); - result = div64_u64_rem(dividend, divisor, remainder); return result; @@ -213,9 +211,6 @@ struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg) * @note * Good idea to use Newton's method */ - - ASSERT(arg.value); - return dc_fixpt_from_fraction( dc_fixpt_one.value, arg.value); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 3bacf470f7c5..67f08495b7e6 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2384,10 +2384,10 @@ static enum bp_result get_integrated_info_v8( } /* - * get_integrated_info_v8 + * get_integrated_info_v9 * * @brief - * Get V8 integrated BIOS information + * Get V9 integrated BIOS information * * @param * bios_parser *bp - [in]BIOS parser handler to get master data table diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2c645dffec18..f2b1720a6a66 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7( process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate; process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask; } + dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction; } // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index d9955c5d2e5e..60021671b386 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21) ############################################################################### # DCN30 ############################################################################### -CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o +CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30)) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 4c3e58c730b1..33b9d36619ff 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - if (!stream->dpms_off || (stream_status && stream_status->plane_count)) + if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count)) display_count++; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 19897fa52e7e..d82a52319088 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di return actual_dispclk_set_mhz * 1000; } - -int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) -{ - int actual_dprefclk_set_mhz = -1; - - actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDprefclkFreq, - khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); - - /* TODO: add code for programing DP DTO, currently this is down by command table */ - - return actual_dprefclk_set_mhz * 1000; -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h index 083cb3158859..81d7c912549c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h @@ -27,6 +27,5 @@ #define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); -int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 23b390245b5d..5a633333dbb5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis return actual_dispclk_set_mhz * 1000; } -int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr) -{ - int actual_dprefclk_set_mhz = -1; - - actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDprefclkFreq, - khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz)); - - /* TODO: add code for programing DP DTO, currently this is down by command table */ - - return actual_dprefclk_set_mhz * 1000; -} - int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz) { int actual_dcfclk_set_mhz = -1; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index 1ce19d875358..f76fad87f0e1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -30,7 +30,6 @@ enum dcn_pwr_state; int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr); int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz); -int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr); int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz); int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h index fa09c594fd36..06da34676965 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h @@ -56,6 +56,7 @@ #define DALSMC_MSG_SetDisplayRefreshFromMall 0xF #define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10 #define DALSMC_MSG_BacoAudioD3PME 0x11 -#define DALSMC_Message_Count 0x12 +#define DALSMC_MSG_SmartAccess 0x12 +#define DALSMC_Message_Count 0x13 #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 8083a553c60e..ef77fcd164ed 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -30,6 +30,7 @@ #include "dce100/dce_clk_mgr.h" #include "dcn30/dcn30_clk_mgr.h" #include "dml/dcn30/dcn30_fpu.h" +#include "dcn30/dcn30m_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = { .are_clock_states_equal = dcn3_are_clock_states_equal, .enable_pme_wa = dcn3_enable_pme_wa, .notify_link_rate_change = dcn30_notify_link_rate_change, - .is_smu_present = dcn3_is_smu_present + .is_smu_present = dcn3_is_smu_present, + .set_smartmux_switch = dcn30m_set_smartmux_switch }; static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c new file mode 100644 index 000000000000..8e8a11c7437e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "clk_mgr_internal.h" +#include "dcn30/dcn30m_clk_mgr.h" +#include "dcn30m_clk_mgr_smu_msg.h" + + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h new file mode 100644 index 000000000000..757985b2eadc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN30M_CLK_MGR_H__ +#define __DCN30M_CLK_MGR_H__ + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set); + +#endif //__DCN30M_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c new file mode 100644 index 000000000000..0dd0583ff21e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c @@ -0,0 +1,118 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn30m_clk_mgr_smu_msg.h" + +#include "clk_mgr_internal.h" +#include "reg_helper.h" +#include "dm_helpers.h" + +#include "dalsmc.h" + +#define mmDAL_MSG_REG 0x1628A +#define mmDAL_ARG_REG 0x16273 +#define mmDAL_RESP_REG 0x16274 + +#define REG(reg_name) \ + mm ## reg_name + +#include "logger_types.h" +#undef DC_LOGGER +#define DC_LOGGER \ + CTX->logger +#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } + + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, + unsigned int delay_us, unsigned int max_retries) +{ + uint32_t reg = 0; + + do { + reg = REG_READ(DAL_RESP_REG); + if (reg) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + /* handle DALSMC_Result_CmdRejectedBusy? */ + + /* Log? */ + + return reg; +} + +static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + uint32_t msg_id, uint32_t param_in, uint32_t *param_out) +{ + uint32_t result; + /* Wait for response register to be ready */ + dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + /* Clear response register */ + REG_WRITE(DAL_RESP_REG, 0); + + /* Set the parameter register for the SMU message */ + REG_WRITE(DAL_ARG_REG, param_in); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(DAL_MSG_REG, msg_id); + + result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + if (IS_SMU_TIMEOUT(result)) + dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000); + + /* Wait for response */ + if (result == DALSMC_Result_OK) { + if (param_out) + *param_out = REG_READ(DAL_ARG_REG); + + return true; + } + + return false; +} + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set) +{ + uint32_t response = 0; + + smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set); + + dcn30m_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SmartAccess, pins_to_set, &response); + + return response; +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h new file mode 100644 index 000000000000..8a59a473fc5e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h @@ -0,0 +1,34 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ +#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ + +#include "core_types.h" + +struct clk_mgr_internal; + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set); +#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 2d14346b680e..478b4d6a3544 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -49,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000 { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } } } }; + +#define CTX clk_mgr->base.ctx +#define IND_REG(offset) offset #define regBIF_BX_PF2_RSMU_INDEX 0x0000 #define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 @@ -67,9 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define FN(reg_name, field) \ FD(reg_name##__##field) -#define REG_NBIO(reg_name) \ - (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) - #undef DC_LOGGER #define DC_LOGGER \ CTX->logger @@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define mmMP1_C2PMSG_3 0x3B1050C +#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF) +#define reg__MP1_C2PMSG_3__SHIFT (0) + + +#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF) +#define data_reg_name__MP1_C2PMSG_3__SHIFT (0) + #define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team #define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version #define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0 @@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param( for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) { /* Trigger the message transaction by writing the message ID */ - generic_write_indirect_reg(CTX, - REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), - mmMP1_C2PMSG_3, msg_id); - read_back_data = generic_read_indirect_reg(CTX, - REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), - mmMP1_C2PMSG_3); + IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0, + MP1_C2PMSG_3, msg_id); + IX_REG_GET_SYNC(mmMP1_C2PMSG_3, + MP1_C2PMSG_3, &read_back_data); if (read_back_data == msg_id) break; udelay(2); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c index 6a6ae618650b..4607eff07253 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c @@ -65,6 +65,7 @@ #define mmCLK1_CLK5_ALLOW_DS 0x16EB1 #define mmCLK5_spll_field_8 0x1B04B +#define mmCLK6_spll_field_8 0x1B24B #define mmDENTIST_DISPCLK_CNTL 0x0124 #define regDENTIST_DISPCLK_CNTL 0x0064 #define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 142de8938d7c..bb1ac12a2b09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -90,6 +90,7 @@ #define mmCLK1_CLK5_ALLOW_DS 0x16EB1 #define mmCLK5_spll_field_8 0x1B24B +#define mmCLK6_spll_field_8 0x1B24B #define mmDENTIST_DISPCLK_CNTL 0x0124 #define regDENTIST_DISPCLK_CNTL 0x0064 #define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 @@ -116,6 +117,7 @@ #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L #define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L +#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 #undef FN @@ -596,7 +598,11 @@ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) uint32_t ssc_enable; - ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK; + if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) { + ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK; + } else { + ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK; + } return ssc_enable != 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index f6f0e6a33001..604d256cb47a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -84,8 +84,8 @@ #define VBIOSSMC_MSG_AllowZstatesEntry 0x15 #define VBIOSSMC_MSG_DisallowZstatesEntry 0x16 #define VBIOSSMC_MSG_SetDtbClk 0x17 -#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU -#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU +#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU +#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU #define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS #define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number #define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported @@ -475,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) retv = dcn35_smu_send_msg_with_param( clk_mgr, - VBIOSSMC_MSG_DispPsrExit, + VBIOSSMC_MSG_DispIPS2Exit, 0); smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv); return retv; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index a3b8e3d4a429..b59703467128 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -22,8 +22,6 @@ #include "dcn/dcn_4_1_0_offset.h" #include "dcn/dcn_4_1_0_sh_mask.h" -#include "dml/dcn401/dcn401_fpu.h" - #define DCN_BASE__INST0_SEG1 0x000000C0 #define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37 @@ -183,43 +181,36 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr) { - /* legacy */ - DC_FP_START(); - dcn401_build_wm_range_table_fpu(clk_mgr); - DC_FP_END(); - - if (clk_mgr->ctx->dc->debug.using_dml21) { - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; - - /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; - } else { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; - } - - /* Set 1B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; + /* For min clocks use as reported by PM FW and report those as min */ + uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; + uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + + /* Set A - Normal - default values */ + clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; + + /* Set B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; + + /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ + /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ + if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; + } else { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; } + + /* Set 1B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; } void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) @@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_build_wm_range_table(clk_mgr_base); } +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return clk_mgr->smu_present && clk_mgr->dpm_present && + ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz)); +} + static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { @@ -1490,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + unsigned int num_clk_levels; + + switch (clk_type) { + case CLK_TYPE_DISPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dispclk; + case CLK_TYPE_DPPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dppclk; + case CLK_TYPE_DSCCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : + clk_mgr->base.boot_snapshot.dispclk / 3; + default: + break; + } + + return 0; +} + static struct clk_mgr_funcs dcn401_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz, @@ -1505,6 +1544,8 @@ static struct clk_mgr_funcs dcn401_funcs = { .get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist, .get_hard_min_memclk = dcn401_get_hard_min_memclk, .get_hard_min_fclk = dcn401_get_hard_min_fclk, + .is_dc_mode_present = dcn401_is_dc_mode_present, + .get_max_clock_khz = dcn401_get_max_clock_khz, }; struct clk_mgr_internal *dcn401_clk_mgr_construct( @@ -1565,7 +1606,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); if (!clk_mgr->base.bw_params) { BREAK_TO_DEBUGGER(); - kfree(clk_mgr); + kfree(clk_mgr401); return NULL; } @@ -1576,6 +1617,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( if (!clk_mgr->wm_range_table) { BREAK_TO_DEBUGGER(); kfree(clk_mgr->base.bw_params); + kfree(clk_mgr401); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h index 6c9ae5ca2c7e..97a1ce1e8a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h @@ -105,10 +105,13 @@ struct dcn401_clk_mgr { }; void dcn401_init_clocks(struct clk_mgr *clk_mgr_base); +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base); struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx, struct dccg *dccg); void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr); +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); + #endif /* __DCN401_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 28d1353f403d..28aca7017f0f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -36,7 +36,9 @@ #include "resource.h" #include "dc_state.h" #include "dc_state_priv.h" +#include "dc_plane.h" #include "dc_plane_priv.h" +#include "dc_stream_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -239,6 +241,7 @@ static bool create_links( DC_LOG_DC("BIOS object table - end"); /* Create a link for each usb4 dpia port */ + dc->lowest_dpia_link_index = MAX_LINKS; for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; @@ -251,6 +254,9 @@ static bool create_links( link = dc->link_srv->create_link(&link_init_params); if (link) { + if (dc->lowest_dpia_link_index > dc->link_count) + dc->lowest_dpia_link_index = dc->link_count; + dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; @@ -439,9 +445,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates. */ - if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->ctx->dce_version > DCE_VERSION_MAX) { + if (dc->optimized_required || dc->wm_optimized_required) { + stream->adjust.timing_adjust_pending = true; return false; + } + } dc_exit_ips_for_hw_access(dc); @@ -967,6 +976,8 @@ static bool dc_construct_ctx(struct dc *dc, if (!dc_ctx) return false; + dc_stream_init_rmcm_3dlut(dc); + dc_ctx->cgs_device = init_params->cgs_device; dc_ctx->driver_context = init_params->driver; dc_ctx->dc = dc; @@ -1192,6 +1203,12 @@ static void apply_ctx_interdependent_lock(struct dc *dc, static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { + if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) { + memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color, + sizeof(pipe_ctx->visual_confirm_color)); + return; + } + if (dc->ctx->dce_version >= DCN_VERSION_1_0) { memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); @@ -1225,6 +1242,51 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte } } +void dc_get_visual_confirm_for_stream( + struct dc *dc, + struct dc_stream_state *stream_state, + struct tg_color *color) +{ + struct dc_stream_status *stream_status = dc_stream_get_status(stream_state); + struct pipe_ctx *pipe_ctx; + int i; + struct dc_plane_state *plane_state = NULL; + + if (!stream_status) + return; + + switch (dc->debug.visual_confirm) { + case VISUAL_CONFIRM_DISABLE: + return; + case VISUAL_CONFIRM_PSR: + case VISUAL_CONFIRM_FAMS: + pipe_ctx = dc_stream_get_pipe_ctx(stream_state); + if (!pipe_ctx) + return; + dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx); + memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color)); + return; + + default: + /* find plane with highest layer_index */ + for (i = 0; i < stream_status->plane_count; i++) { + if (stream_status->plane_states[i]->visible) + plane_state = stream_status->plane_states[i]; + } + if (!plane_state) + return; + /* find pipe that contains plane with highest layer index */ + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state == plane_state) { + memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color)); + return; + } + } + } +} + static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; @@ -2053,6 +2115,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.enable_accelerated_mode(dc, context); } + if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + //Only delay otg master for a given config + if (resource_is_pipe_type(pipe, OTG_MASTER)) { + //dc_commit_state_no_check is always a full update + dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false); + break; + } + } + } + if (context->stream_count > get_seamless_boot_stream_count(context) || context->stream_count == 0) dc->hwss.prepare_bandwidth(dc, context); @@ -2117,6 +2191,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (dc->hwss.program_front_end_for_ctx) { dc->hwss.interdependent_update_lock(dc, context, true); dc->hwss.program_front_end_for_ctx(dc, context); + + if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe); + } + } + dc->hwss.interdependent_update_lock(dc, context, false); dc->hwss.post_unlock_program_front_end(dc, context); } @@ -2258,11 +2340,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params for (i = 0; i < params->stream_count; i++) { struct dc_stream_state *stream = params->streams[i]; struct dc_stream_status *status = dc_stream_get_status(stream); + struct dc_sink *sink = stream->sink; /* revalidate streams */ - res = dc_validate_stream(dc, stream); - if (res != DC_OK) - return res; + if (!dc_is_virtual_signal(sink->sink_signal)) { + res = dc_validate_stream(dc, stream); + if (res != DC_OK) + return res; + } + dc_stream_log(dc, stream); @@ -2297,7 +2383,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; - res = dc_validate_with_context(dc, set, params->stream_count, context, false); + res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING); /* * Only update link encoder to stream assignment after bandwidth validation passed. @@ -2815,7 +2901,7 @@ static enum surface_update_type check_update_surfaces_for_stream( int i; enum surface_update_type overall_type = UPDATE_TYPE_FAST; - if (dc->idle_optimizations_allowed) + if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) overall_type = UPDATE_TYPE_FULL; if (stream_status == NULL || stream_status->plane_count != surface_count) @@ -3168,7 +3254,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->crtc_timing_adjust) { if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || - stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max) + stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || + stream->adjust.timing_adjust_pending) update->crtc_timing_adjust->timing_adjust_pending = true; stream->adjust = *update->crtc_timing_adjust; update->crtc_timing_adjust->timing_adjust_pending = false; @@ -3219,7 +3306,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (dsc_validate_context) { stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; - if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, + DC_VALIDATE_MODE_ONLY) != DC_OK) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; update->dsc_config = NULL; @@ -3248,7 +3336,7 @@ static void backup_planes_and_stream_state( return; for (i = 0; i < status->plane_count; i++) { - scratch->plane_states[i] = *status->plane_states[i]; + dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]); } scratch->stream_state = *stream; } @@ -3264,10 +3352,7 @@ static void restore_planes_and_stream_state( return; for (i = 0; i < status->plane_count; i++) { - /* refcount will always be valid, restore everything else */ - struct kref refcount = status->plane_states[i]->refcount; - *status->plane_states[i] = scratch->plane_states[i]; - status->plane_states[i]->refcount = refcount; + dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]); } *stream = scratch->stream_state; } @@ -3444,7 +3529,7 @@ static bool update_planes_and_stream_state(struct dc *dc, } if (update_type == UPDATE_TYPE_FULL) { - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; } @@ -3998,6 +4083,7 @@ static void commit_planes_for_stream(struct dc *dc, &context->res_ctx, stream); ASSERT(top_pipe_to_program != NULL); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -4048,6 +4134,9 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program); } + if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) + dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST); + if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); @@ -4168,12 +4257,6 @@ static void commit_planes_for_stream(struct dc *dc, if (update_type == UPDATE_TYPE_FAST) continue; - ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); - if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { - /*turn off triple buffer for full update*/ - dc->hwss.program_triplebuffer( - dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); - } stream_status = stream_get_status(context, pipe_ctx->stream); @@ -4182,8 +4265,37 @@ static void commit_planes_for_stream(struct dc *dc, dc, pipe_ctx->stream, stream_status->plane_count, context); } } + + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->plane_state) + continue; + + /* Full fe update*/ + if (update_type == UPDATE_TYPE_FAST) + continue; + + ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); + if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { + /*turn off triple buffer for full update*/ + dc->hwss.program_triplebuffer( + dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + } + } + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { dc->hwss.program_front_end_for_ctx(dc, context); + + //Pipe busy until some frame and line # + if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) { + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx); + } + } + if (dc->debug.validate_dml_output) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; @@ -4523,7 +4635,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, + DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) { /* prevent underflow and corruption when reconfiguring pipes */ force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { @@ -4958,6 +5071,9 @@ static bool full_update_required(struct dc *dc, if (dc->idle_optimizations_allowed) return true; + if (dc_can_clear_cursor_limit(dc)) + return true; + return false; } @@ -5043,7 +5159,7 @@ static bool update_planes_and_stream_v1(struct dc *dc, copy_stream_update_to_stream(dc, context, stream, stream_update); if (update_type >= UPDATE_TYPE_FULL) { - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { DC_ERROR("Mode validation failed for stream update!\n"); dc_state_release(context); return false; @@ -5327,8 +5443,8 @@ bool dc_update_planes_and_stream(struct dc *dc, else ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - - if (ret) + if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_01)) clear_update_flags(srf_updates, surface_count, stream); return ret; @@ -5359,7 +5475,7 @@ void dc_commit_updates_for_stream(struct dc *dc, ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, stream_update, state); - if (ret) + if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) clear_update_flags(srf_updates, surface_count, stream); } @@ -5432,6 +5548,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } break; + case DC_ACPI_CM_POWER_STATE_D3: + if (dc->caps.ips_support) + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); + + if (dc->caps.ips_v2_support) { + if (dc->clk_mgr->funcs->set_low_power_state) + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); + } + break; default: ASSERT(dc->current_state->stream_count == 0); dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); @@ -6187,15 +6312,22 @@ bool dc_abm_save_restore( void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) { unsigned int i; - bool subvp_sw_cursor_req = false; + unsigned int max_cursor_size = dc->caps.max_cursor_size; + unsigned int stream_cursor_size; - for (i = 0; i < dc->current_state->stream_count; i++) { - if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) { - subvp_sw_cursor_req = true; - break; + if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { + for (i = 0; i < dc->current_state->stream_count; i++) { + stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, + dc->current_state, + dc->current_state->streams[i]); + + if (stream_cursor_size < max_cursor_size) { + max_cursor_size = stream_cursor_size; + } } } - properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size; + + properties->cursor_size_limit = max_cursor_size; } /** @@ -6222,13 +6354,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); } -/* - ***************************************************************************** +/** * dc_get_power_profile_for_dc_state() - extracts power profile from dc state * * Called when DM wants to make power policy decisions based on dc_state * - ***************************************************************************** + * @context: Pointer to the dc_state from which the power profile is extracted. + * + * Return: The power profile structure containing the power level information. */ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) { @@ -6244,13 +6377,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state return profile; } -/* - ********************************************************************************** +/** * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state * - * Called when DM wants to log detile buffer size from dc_state + * This function is called to log the detile buffer size from the dc_state. * - ********************************************************************************** + * @context: a pointer to the dc_state from which the detile buffer size is extracted. + * + * Return: the size of the detile buffer, or 0 if not available. */ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) { @@ -6261,3 +6395,57 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) else return 0; } + +/** + * dc_get_host_router_index: Get index of host router from a dpia link + * + * This function return a host router index of the target link. If the target link is dpia link. + * + * @link: Pointer to the target link (input) + * @host_router_index: Pointer to store the host router index of the target link (output). + * + * Return: true if the host router index is found and valid. + * + */ +bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index) +{ + struct dc *dc; + + if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + return false; + + dc = link->ctx->dc; + + if (link->link_index < dc->lowest_dpia_link_index) + return false; + + *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router; + if (*host_router_index < dc->caps.num_of_host_routers) + return true; + else + return false; +} + +bool dc_is_cursor_limit_pending(struct dc *dc) +{ + uint32_t i; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i])) + return true; + } + + return false; +} + +bool dc_can_clear_cursor_limit(struct dc *dc) +{ + uint32_t i; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state)) + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 650e89825968..bbce751b485f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -266,6 +266,10 @@ char *dc_status_to_str(enum dc_status status) return "Fail dp payload allocation"; case DC_FAIL_DP_LINK_BANDWIDTH: return "Insufficient DP link bandwidth"; + case DC_FAIL_HW_CURSOR_SUPPORT: + return "HW Cursor not supported"; + case DC_FAIL_DP_TUNNEL_BW_VALIDATE: + return "Fail DP Tunnel BW validation"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 55b32dfbfdd6..ec4e80e5b6eb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color( } } +/* Visual Confirm color definition for Smart Mux */ +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + const struct tg_color sm_ver_colors[5] = { + {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */ + {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */ + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/ + }; + + if (dc->caps.is_apu) { + /* APU driving the eDP */ + *color = sm_ver_colors[dc->config.smart_mux_version]; + } else { + /* dGPU driving the eDP - red */ + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } +} + /* Visual Confirm color definition for VABC */ void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, @@ -697,7 +723,7 @@ void get_fams2_visual_confirm_color( void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, - struct block_sequence block_sequence[], + struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE], unsigned int *num_steps, struct pipe_ctx *pipe_ctx, struct dc_stream_status *stream_status, @@ -896,7 +922,7 @@ void hwss_build_fast_sequence(struct dc *dc, } void hwss_execute_sequence(struct dc *dc, - struct block_sequence block_sequence[], + struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE], int num_steps) { unsigned int i; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 71e15da4bb69..130455f2802a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -515,7 +515,8 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) link->dc->link_srv->enable_hpd_filter(link, enable); } -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - return dc->link_srv->validate_dpia_bandwidth(streams, count); + return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx); } + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 313a32248cd7..4d6181e7c612 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1342,32 +1342,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) data->viewport_c.y += src.y / vpc_div; } -static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) -{ - uint32_t refresh_rate; - struct dc *dc = stream->ctx->dc; - - refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + - stream->timing.v_total * stream->timing.h_total - (uint64_t)1); - refresh_rate = div_u64(refresh_rate, stream->timing.v_total); - refresh_rate = div_u64(refresh_rate, stream->timing.h_total); - - /* If there's any stream that fits the SubVP high refresh criteria, - * we must return true. This is because cursor updates are asynchronous - * with full updates, so we could transition into a SubVP config and - * remain in HW cursor mode if there's no cursor update which will - * then cause corruption. - */ - if ((refresh_rate >= 120 && refresh_rate <= 175 && - stream->timing.v_addressable >= 1080 && - stream->timing.v_addressable <= 2160) && - (dc->current_state->stream_count > 1 || - (dc->current_state->stream_count == 1 && !stream->allow_freesync))) - return true; - - return false; -} - static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern( enum dp_test_pattern test_pattern) { @@ -3937,6 +3911,10 @@ enum dc_status resource_map_pool_resources( if (!dc->link_srv->dp_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings)) return DC_FAIL_DP_LINK_BANDWIDTH; + + dc->link_srv->dp_decide_tunnel_settings(stream, + &pipe_ctx->link_config.dp_tunnel_settings); + if (dc->link_srv->dp_get_encoding_format( &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { pipe_ctx->stream_res.hpo_dp_stream_enc = @@ -3962,7 +3940,9 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count && stream->audio_info.flags.all) { + stream->audio_info.mode_count && + (stream->audio_info.flags.all || + (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); @@ -4075,7 +4055,7 @@ static bool add_all_planes_for_stream( * @set: An array of dc_validation_set with all the current streams reference * @set_count: Total of streams * @context: New context - * @fast_validate: Enable or disable fast validation + * @validate_mode: identify the validation mode * * This function updates the potential new stream in the context object. It * creates multiple lists for the add, remove, and unchanged streams. In @@ -4090,7 +4070,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 }; struct dc_stream_state *del_streams[MAX_PIPES] = { 0 }; @@ -4259,7 +4239,12 @@ enum dc_status dc_validate_with_context(struct dc *dc, } } - res = dc_validate_global_state(dc, context, fast_validate); + /* clear subvp cursor limitations */ + for (i = 0; i < context->stream_count; i++) { + dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false); + } + + res = dc_validate_global_state(dc, context, validate_mode); /* calculate pixel rate divider after deciding pxiel clock & odm combine */ if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) { @@ -4316,7 +4301,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) * * @dc: dc struct for this driver * @new_ctx: state to be validated - * @fast_validate: set to true if only yes/no to support matters + * @validate_mode: identify the validation mode * * Checks hardware resource availability and bandwidth requirement. * @@ -4326,7 +4311,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate) + enum dc_validate_mode validate_mode) { enum dc_status result = DC_ERROR_UNEXPECTED; int i, j; @@ -4385,8 +4370,7 @@ enum dc_status dc_validate_global_state( result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) - if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) - result = DC_FAIL_BANDWIDTH_VALIDATE; + result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode); return result; } @@ -5538,23 +5522,17 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, return DC_OK; } -bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream) +struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx) { - if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) - return true; - if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && - ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) - return true; - else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 && - ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) - return true; - - return false; + return &pipe_ctx->plane_res.scl_data.dscl_prog_data; } -struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx) +static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params) { - return &pipe_ctx->plane_res.scl_data.dscl_prog_data; + if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config) + context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params); + + return true; } void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options) @@ -5576,6 +5554,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status; dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id; dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase; + dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache; dml2_options->svp_pstate.callbacks.dc = dc; dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 1b2cce127981..883054bb18e7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include "dc_types.h" #include "core_types.h" #include "core_status.h" #include "dc_state.h" @@ -193,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state) struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { struct dc_state *state; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); -#endif state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -210,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { - dml2_opt->use_clock_dc_limits = false; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) { + if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) { dc_state_release(state); return NULL; } - dml2_opt->use_clock_dc_limits = true; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) { + if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { dc_state_release(state); return NULL; } @@ -433,6 +427,8 @@ enum dc_status dc_state_remove_stream( return DC_ERROR_UNEXPECTED; } + dc_stream_release_3dlut_for_stream(dc, stream); + dc_stream_release(state->streams[i]); state->stream_count--; @@ -812,8 +808,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc, if (phantom_stream_status) { phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM; phantom_stream_status->mall_stream_config.paired_stream = main_stream; + phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false; + phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false; } + dc_state_set_stream_subvp_cursor_limit(main_stream, state, true); + return res; } @@ -939,13 +939,20 @@ void dc_state_release_phantom_streams_and_planes( const struct dc *dc, struct dc_state *state) { + unsigned int phantom_count; + struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES]; + struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES]; int i; - for (i = 0; i < state->phantom_stream_count; i++) - dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]); + phantom_count = state->phantom_stream_count; + memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES); + for (i = 0; i < phantom_count; i++) + dc_state_release_phantom_stream(dc, state, phantom_streams[i]); - for (i = 0; i < state->phantom_plane_count; i++) - dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); + phantom_count = state->phantom_plane_count; + memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES); + for (i = 0; i < phantom_count; i++) + dc_state_release_phantom_plane(dc, state, phantom_planes[i]); } struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id) @@ -977,3 +984,94 @@ bool dc_state_is_fams2_in_use( return is_fams2_in_use; } + +void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit) +{ + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + stream_status->mall_stream_config.subvp_limit_cursor_size = limit; + } +} + +bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state) +{ + bool limit = false; + + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + limit = stream_status->mall_stream_config.subvp_limit_cursor_size; + } + + return limit; +} + +void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit) +{ + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + stream_status->mall_stream_config.cursor_size_limit_subvp = limit; + } +} + +bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state) +{ + bool limit = false; + + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + limit = stream_status->mall_stream_config.cursor_size_limit_subvp; + } + + return limit; +} + +bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state) +{ + bool can_clear_limit = false; + + struct dc_stream_status *stream_status; + + stream_status = dc_state_get_stream_status(state, stream); + + if (stream_status) { + can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) && + (stream_status->mall_stream_config.type == SUBVP_PHANTOM || + stream->hw_cursor_req || + !stream_status->mall_stream_config.subvp_limit_cursor_size || + !stream->cursor_position.enable || + dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes)); + } + + return can_clear_limit; +} + +bool dc_state_is_subvp_in_use(struct dc_state *state) +{ + uint32_t i; + + for (i = 0; i < state->stream_count; i++) { + if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE) + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 0478dd856d8c..4d6bc9fd4faa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -265,13 +265,16 @@ void program_cursor_attributes( } /* - * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address + * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address */ -bool dc_stream_set_cursor_attributes( - struct dc_stream_state *stream, +bool dc_stream_check_cursor_attributes( + const struct dc_stream_state *stream, + struct dc_state *state, const struct dc_cursor_attributes *attributes) { - struct dc *dc; + const struct dc *dc; + + unsigned int max_cursor_size; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); @@ -289,24 +292,38 @@ bool dc_stream_set_cursor_attributes( dc = stream->ctx->dc; - /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. - * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: - * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) - * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz - * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz + /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM. + * Therefore, if cursor is greater than this, fallback to SW cursor. */ - if (dc->debug.allow_sw_cursor_fallback && - attributes->height * attributes->width * 4 > 16384 && - !stream->hw_cursor_req) { - if (check_subvp_sw_cursor_fallback_req(dc, stream)) + if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { + max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream); + max_cursor_size = max_cursor_size * max_cursor_size * 4; + + if (attributes->height * attributes->width * 4 > max_cursor_size) { return false; + } } - stream->cursor_attributes = *attributes; - return true; } +/* + * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address + */ +bool dc_stream_set_cursor_attributes( + struct dc_stream_state *stream, + const struct dc_cursor_attributes *attributes) +{ + bool result = false; + + if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) { + stream->cursor_attributes = *attributes; + result = true; + } + + return result; +} + bool dc_stream_program_cursor_attributes( struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes) @@ -552,6 +569,14 @@ bool dc_stream_fc_disable_writeback(struct dc *dc, return true; } +/** + * dc_stream_remove_writeback() - Disables writeback and removes writeback info. + * @dc: Display core control structure. + * @stream: Display core stream state. + * @dwb_pipe_inst: Display writeback pipe. + * + * Return: returns true on success, false otherwise. + */ bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) @@ -832,6 +857,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) } /* +* dc_stream_get_3dlut() +* Requirements: +* 1. Is stream already owns an RMCM instance, return it. +* 2. If it doesn't and we don't need to allocate, return NULL. +* 3. If there's a free RMCM instance, assign to stream and return it. +* 4. If no free RMCM instances, return NULL. +*/ + +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + // see if one is allocated for this stream + for (int i = 0; i < num_rmcm; i++) { + if (dc->res_pool->rmcm_3dlut[i].isInUse && + dc->res_pool->rmcm_3dlut[i].stream == stream) + return &dc->res_pool->rmcm_3dlut[i]; + } + + //case: not found one, and dont need to allocate + if (!allocate_one) + return NULL; + + //see if there is an unused 3dlut, allocate + for (int i = 0; i < num_rmcm; i++) { + if (!dc->res_pool->rmcm_3dlut[i].isInUse) { + dc->res_pool->rmcm_3dlut[i].isInUse = true; + dc->res_pool->rmcm_3dlut[i].stream = stream; + return &dc->res_pool->rmcm_3dlut[i]; + } + } + + //dont have a 3dlut + return NULL; +} + + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream) +{ + struct dc_rmcm_3dlut *rmcm_3dlut = + dc_stream_get_3dlut_for_stream(dc, stream, false); + + if (rmcm_3dlut) { + rmcm_3dlut->isInUse = false; + rmcm_3dlut->stream = NULL; + rmcm_3dlut->protection_bits = 0; + } +} + + +void dc_stream_init_rmcm_3dlut(struct dc *dc) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + for (int i = 0; i < num_rmcm; i++) { + dc->res_pool->rmcm_3dlut[i].isInUse = false; + dc->res_pool->rmcm_3dlut[i].stream = NULL; + dc->res_pool->rmcm_3dlut[i].protection_bits = 0; + } +} + +/* * Finds the greatest index in refresh_rate_hz that contains a value <= refresh */ static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh) @@ -1109,3 +1201,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false); } + +bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream) +{ + bool is_limit_pending = false; + + if (dc->current_state) + is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state); + + return is_limit_pending; +} + +bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream) +{ + bool can_clear_limit = false; + + if (dc->current_state) + can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) && + (stream->hw_cursor_req || + !stream->cursor_position.enable || + dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes)); + + return can_clear_limit; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index e6fcc21bb9bc..922f23557f5d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc) ***************************************************************************** */ const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state) + const struct dc_plane_state *plane_state, + union dc_plane_status_update_flags flags) { const struct dc_plane_status *plane_status; struct dc *dc; @@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status( if (pipe_ctx->plane_state != plane_state) continue; - if (pipe_ctx->plane_state) + if (pipe_ctx->plane_state && flags.bits.address) pipe_ctx->plane_state->status.is_flip_pending = false; break; @@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status( if (pipe_ctx->plane_state != plane_state) continue; - dc->hwss.update_pending_status(pipe_ctx); + if (flags.bits.address) + dc->hwss.update_pending_status(pipe_ctx); } return plane_status; @@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state, dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling); } } + +void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src) +{ + struct kref temp_refcount; + + /* backup persistent info */ + memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref)); + + /* copy all configuration information */ + memcpy(dst, src, sizeof(struct dc_plane_state)); + + /* restore persistent info */ + memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7c2ee0526926..59c07756130d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -46,6 +46,8 @@ #include "dmub/inc/dmub_cmd.h" +#include "sspl/dc_spl_types.h" + struct abm_save_restore; /* forward declaration */ @@ -53,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.325" +#define DC_VER "3.2.340" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -66,7 +68,11 @@ struct dmub_notification; #define MAX_STREAMS 6 #define MIN_VIEWPORT_SIZE 12 #define MAX_NUM_EDP 2 -#define MAX_HOST_ROUTERS_NUM 2 +#define MAX_SUPPORTED_FORMATS 7 + +#define MAX_HOST_ROUTERS_NUM 3 +#define MAX_DPIA_PER_HOST_ROUTER 3 +#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER) /* Display Core Interfaces */ struct dc_versions { @@ -192,6 +198,34 @@ struct dpp_color_caps { struct rom_curve_caps ogam_rom_caps; }; +/* Below structure is to describe the HW support for mem layout, extend support + range to match what OS could handle in the roadmap */ +struct lut3d_caps { + uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */ + struct { + uint32_t swizzle_3d_rgb : 1; + uint32_t swizzle_3d_bgr : 1; + uint32_t linear_1d : 1; + } mem_layout_support; + struct { + uint32_t unorm_12msb : 1; + uint32_t unorm_12lsb : 1; + uint32_t float_fp1_5_10 : 1; + } mem_format_support; + struct { + uint32_t order_rgba : 1; + uint32_t order_bgra : 1; + } mem_pixel_order_support; + /*< size options are 9, 17, 33, 45, 65 */ + struct { + uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */ + uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */ + uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */ + uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */ + uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */ + } lut_dim_caps; +}; + /** * struct mpc_color_caps - color pipeline capabilities for multiple pipe and * plane combined blocks @@ -203,14 +237,21 @@ struct dpp_color_caps { * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single * instance * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT + * @mcm_3d_lut_caps: HW support cap for MCM LUT memory + * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory + * @preblend: whether color manager supports preblend with MPC */ struct mpc_color_caps { uint16_t gamut_remap : 1; uint16_t ogam_ram : 1; uint16_t ocsc : 1; uint16_t num_3dluts : 3; + uint16_t num_rmcm_3dluts : 3; uint16_t shared_3d_lut:1; struct rom_curve_caps ogam_rom_caps; + struct lut3d_caps mcm_3d_lut_caps; + struct lut3d_caps rmcm_3d_lut_caps; + bool preblend; }; /** @@ -249,6 +290,7 @@ struct dc_caps { uint32_t i2c_speed_in_khz_hdcp; uint32_t dmdata_alloc_size; unsigned int max_cursor_size; + unsigned int max_buffered_cursor_size; unsigned int max_video_width; /* * max video plane width that can be safely assumed to be always @@ -269,6 +311,7 @@ struct dc_caps { bool dmcub_support; bool zstate_support; bool ips_support; + bool ips_v2_support; uint32_t num_of_internal_disp; enum dp_protocol_version max_dp_protocol_version; unsigned int mall_size_per_mem_channel; @@ -282,6 +325,7 @@ struct dc_caps { bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; + bool fused_io_supported; uint32_t max_otg_num; uint32_t max_cab_allocation_bytes; uint32_t cache_line_size; @@ -303,6 +347,10 @@ struct dc_caps { /* Conservative limit for DCC cases which require ODM4:1 to support*/ uint32_t dcc_plane_width_limit; struct dc_scl_caps scl_caps; + uint8_t num_of_host_routers; + uint8_t num_of_dpias_per_host_router; + /* limit of the ODM only, could be limited by other factors (like pipe count)*/ + uint8_t max_odm_combine_factor; }; struct dc_bug_wa { @@ -447,6 +495,7 @@ struct dc_config { bool enable_windowed_mpo_odm; bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520 uint32_t allow_edp_hotplug_detection; + bool skip_riommu_prefetch_wa; bool clamp_min_dcfclk; uint64_t vblank_alignment_dto_params; uint8_t vblank_alignment_max_frame_time_diff; @@ -456,6 +505,7 @@ struct dc_config { bool use_spl; bool prefer_easf; bool use_pipe_ctx_sync_logic; + int smart_mux_version; bool ignore_dpref_ss; bool enable_mipi_converter_optimization; bool use_default_clock_table; @@ -466,6 +516,7 @@ struct dc_config { bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_rcg; unsigned int disable_ips_in_vpb; bool disable_ips_in_dpms_off; bool usb4_bw_alloc_support; @@ -478,6 +529,8 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + struct spl_sharpness_range dcn_sharpness_range; + struct spl_sharpness_range dcn_override_sharpness_range; }; enum visual_confirm { @@ -489,6 +542,7 @@ enum visual_confirm { VISUAL_CONFIRM_SWAPCHAIN = 6, VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, + VISUAL_CONFIRM_SMARTMUX_DGPU = 10, VISUAL_CONFIRM_REPLAY = 12, VISUAL_CONFIRM_SUBVP = 14, VISUAL_CONFIRM_MCLK_SWITCH = 16, @@ -496,6 +550,7 @@ enum visual_confirm { VISUAL_CONFIRM_HW_CURSOR = 20, VISUAL_CONFIRM_VABC = 21, VISUAL_CONFIRM_DCC = 22, + VISUAL_CONFIRM_EXPLICIT = 0x80000000, }; enum dc_psr_power_opts { @@ -766,6 +821,7 @@ enum pg_hw_resources { PG_DCHVM, PG_DWB, PG_HPO, + PG_DCOH, PG_HW_RESOURCES_NUM_ELEMENT }; @@ -782,10 +838,8 @@ union dpia_debug_options { uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ - uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */ - uint32_t enable_dpia_pre_training:1; /* bit 7 */ - uint32_t unify_link_enc_assignment:1; /* bit 8 */ - uint32_t reserved:24; + uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -902,12 +956,18 @@ struct dc_debug_options { bool voltage_align_fclk; bool disable_min_fclk; + bool hdcp_lc_force_fw_enable; + bool hdcp_lc_enable_sw_fallback; + bool disable_dfs_bypass; bool disable_dpp_power_gate; bool disable_hubp_power_gate; bool disable_dsc_power_gate; bool disable_optc_power_gate; bool disable_hpo_power_gate; + bool disable_io_clk_power_gate; + bool disable_mem_power_gate; + bool disable_dio_power_gate; int dsc_min_slice_height_override; int dsc_bpp_increment_div; bool disable_pplib_wm_range; @@ -1144,7 +1204,7 @@ struct dc_init_data { uint32_t *dcn_reg_offsets; uint32_t *nbio_reg_offsets; uint32_t *clk_reg_offsets; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; struct dc_callback_init { @@ -1245,6 +1305,12 @@ union dc_3dlut_state { }; +struct dc_rmcm_3dlut { + bool isInUse; + const struct dc_stream_state *stream; + uint8_t protection_bits; +}; + struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; @@ -1382,6 +1448,8 @@ struct dc_plane_state { int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; + struct spl_sharpness_range sharpness_range; + enum sharpness_range_source sharpness_source; }; struct dc_plane_info { @@ -1418,6 +1486,174 @@ struct dc_scratch_space { struct dc_stream_state stream_state; }; +/* + * A link contains one or more sinks and their connected status. + * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. + */ + struct dc_link { + struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK]; + unsigned int sink_count; + struct dc_sink *local_sink; + unsigned int link_index; + enum dc_connection_type type; + enum signal_type connector_signal; + enum dc_irq_source irq_source_hpd; + enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */ + enum dc_irq_source irq_source_read_request;/* Read Request */ + + bool is_hpd_filter_disabled; + bool dp_ss_off; + + /** + * @link_state_valid: + * + * If there is no link and local sink, this variable should be set to + * false. Otherwise, it should be set to true; usually, the function + * core_link_enable_stream sets this field to true. + */ + bool link_state_valid; + bool aux_access_disabled; + bool sync_lt_in_progress; + bool skip_stream_reenable; + bool is_internal_display; + /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */ + bool is_dig_mapping_flexible; + bool hpd_status; /* HPD status of link without physical HPD pin. */ + bool is_hpd_pending; /* Indicates a new received hpd */ + + /* USB4 DPIA links skip verifying link cap, instead performing the fallback method + * for every link training. This is incompatible with DP LL compliance automation, + * which expects the same link settings to be used every retry on a link loss. + * This flag is used to skip the fallback when link loss occurs during automation. + */ + bool skip_fallback_on_link_loss; + + bool edp_sink_present; + + struct dp_trace dp_trace; + + /* caps is the same as reported_link_cap. link_traing use + * reported_link_cap. Will clean up. TODO + */ + struct dc_link_settings reported_link_cap; + struct dc_link_settings verified_link_cap; + struct dc_link_settings cur_link_settings; + struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX]; + struct dc_link_settings preferred_link_setting; + /* preferred_training_settings are override values that + * come from DM. DM is responsible for the memory + * management of the override pointers. + */ + struct dc_link_training_overrides preferred_training_settings; + struct dp_audio_test_data audio_test_data; + + uint8_t ddc_hw_inst; + + uint8_t hpd_src; + + uint8_t link_enc_hw_inst; + /* DIG link encoder ID. Used as index in link encoder resource pool. + * For links with fixed mapping to DIG, this is not changed after dc_link + * object creation. + */ + enum engine_id eng_id; + enum engine_id dpia_preferred_eng_id; + + bool test_pattern_enabled; + /* Pending/Current test pattern are only used to perform and track + * FIXED_VS retimer test pattern/lane adjustment override state. + * Pending allows link HWSS to differentiate PHY vs non-PHY pattern, + * to perform specific lane adjust overrides before setting certain + * PHY test patterns. In cases when lane adjust and set test pattern + * calls are not performed atomically (i.e. performing link training), + * pending_test_pattern will be invalid or contain a non-PHY test pattern + * and current_test_pattern will contain required context for any future + * set pattern/set lane adjust to transition between override state(s). + * */ + enum dp_test_pattern current_test_pattern; + enum dp_test_pattern pending_test_pattern; + + union compliance_test_state compliance_test_state; + + void *priv; + + struct ddc_service *ddc; + + enum dp_panel_mode panel_mode; + bool aux_mode; + + /* Private to DC core */ + + const struct dc *dc; + + struct dc_context *ctx; + + struct panel_cntl *panel_cntl; + struct link_encoder *link_enc; + struct graphics_object_id link_id; + /* Endpoint type distinguishes display endpoints which do not have entries + * in the BIOS connector table from those that do. Helps when tracking link + * encoder to display endpoint assignments. + */ + enum display_endpoint_type ep_type; + union ddi_channel_mapping ddi_channel_mapping; + struct connector_device_tag_info device_tag; + struct dpcd_caps dpcd_caps; + uint32_t dongle_max_pix_clk; + unsigned short chip_caps; + unsigned int dpcd_sink_count; + struct hdcp_caps hdcp_caps; + enum edp_revision edp_revision; + union dpcd_sink_ext_caps dpcd_sink_ext_caps; + + struct psr_settings psr_settings; + struct replay_settings replay_settings; + + /* Drive settings read from integrated info table */ + struct dc_lane_settings bios_forced_drive_settings; + + /* Vendor specific LTTPR workaround variables */ + uint8_t vendor_specific_lttpr_link_rate_wa; + bool apply_vendor_specific_lttpr_link_rate_wa; + + /* MST record stream using this link */ + struct link_flags { + bool dp_keep_receiver_powered; + bool dp_skip_DID2; + bool dp_skip_reset_segment; + bool dp_skip_fs_144hz; + bool dp_mot_reset_segment; + /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ + bool dpia_mst_dsc_always_on; + /* Forced DPIA into TBT3 compatibility mode. */ + bool dpia_forced_tbt3_mode; + bool dongle_mode_timing_override; + bool blank_stream_on_ocs_change; + bool read_dpcd204h_on_irq_hpd; + bool force_dp_ffe_preset; + bool skip_phy_ssc_reduction; + } wa_flags; + union dc_dp_ffe_preset forced_dp_ffe_preset; + struct link_mst_stream_allocation_table mst_stream_alloc_table; + + struct dc_link_status link_status; + struct dprx_states dprx_states; + + struct gpio *hpd_gpio; + enum dc_link_fec_state fec_state; + bool is_dds; + bool is_display_mux_present; + bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly + + struct dc_panel_config panel_config; + struct phy_state phy_state; + uint32_t phy_transition_bitmask; + // BW ALLOCATON USB4 ONLY + struct dc_dpia_bw_alloc dpia_bw_alloc_config; + bool skip_implict_edp_power_control; + enum backlight_control_type backlight_control_type; +}; + struct dc { struct dc_debug_options debug; struct dc_versions versions; @@ -1431,6 +1667,7 @@ struct dc { uint8_t link_count; struct dc_link *links[MAX_LINKS]; + uint8_t lowest_dpia_link_index; struct link_service *link_srv; struct dc_state *current_state; @@ -1460,6 +1697,10 @@ struct dc { /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + /* For eDP to know the switching state of SmartMux */ + bool is_switch_in_progress_orig; + bool is_switch_in_progress_dest; + /* FBC compressor */ struct compressor *fbc_compressor; @@ -1485,11 +1726,12 @@ struct dc { struct dc_scratch_space current_state; struct dc_scratch_space new_state; struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack + struct dc_link temp_link; bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */ } scratch; struct dml2_configuration_options dml2_options; - struct dml2_configuration_options dml2_tmp; + struct dml2_configuration_options dml2_dc_power_options; enum dc_acpi_cm_power_state power_state; }; @@ -1594,19 +1836,15 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_service *gpio_service); -/* - * fast_validate: we return after determining if we can support the new state, - * but before we populate the programming info - */ enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, @@ -1651,170 +1889,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing( const enum dc_link_encoding_format link_encoding); /* Link Interfaces */ -/* - * A link contains one or more sinks and their connected status. - * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. - */ -struct dc_link { - struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK]; - unsigned int sink_count; - struct dc_sink *local_sink; - unsigned int link_index; - enum dc_connection_type type; - enum signal_type connector_signal; - enum dc_irq_source irq_source_hpd; - enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */ - - bool is_hpd_filter_disabled; - bool dp_ss_off; - - /** - * @link_state_valid: - * - * If there is no link and local sink, this variable should be set to - * false. Otherwise, it should be set to true; usually, the function - * core_link_enable_stream sets this field to true. - */ - bool link_state_valid; - bool aux_access_disabled; - bool sync_lt_in_progress; - bool skip_stream_reenable; - bool is_internal_display; - /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */ - bool is_dig_mapping_flexible; - bool hpd_status; /* HPD status of link without physical HPD pin. */ - bool is_hpd_pending; /* Indicates a new received hpd */ - - /* USB4 DPIA links skip verifying link cap, instead performing the fallback method - * for every link training. This is incompatible with DP LL compliance automation, - * which expects the same link settings to be used every retry on a link loss. - * This flag is used to skip the fallback when link loss occurs during automation. - */ - bool skip_fallback_on_link_loss; - - bool edp_sink_present; - - struct dp_trace dp_trace; - - /* caps is the same as reported_link_cap. link_traing use - * reported_link_cap. Will clean up. TODO - */ - struct dc_link_settings reported_link_cap; - struct dc_link_settings verified_link_cap; - struct dc_link_settings cur_link_settings; - struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX]; - struct dc_link_settings preferred_link_setting; - /* preferred_training_settings are override values that - * come from DM. DM is responsible for the memory - * management of the override pointers. - */ - struct dc_link_training_overrides preferred_training_settings; - struct dp_audio_test_data audio_test_data; - - uint8_t ddc_hw_inst; - - uint8_t hpd_src; - - uint8_t link_enc_hw_inst; - /* DIG link encoder ID. Used as index in link encoder resource pool. - * For links with fixed mapping to DIG, this is not changed after dc_link - * object creation. - */ - enum engine_id eng_id; - enum engine_id dpia_preferred_eng_id; - - bool test_pattern_enabled; - /* Pending/Current test pattern are only used to perform and track - * FIXED_VS retimer test pattern/lane adjustment override state. - * Pending allows link HWSS to differentiate PHY vs non-PHY pattern, - * to perform specific lane adjust overrides before setting certain - * PHY test patterns. In cases when lane adjust and set test pattern - * calls are not performed atomically (i.e. performing link training), - * pending_test_pattern will be invalid or contain a non-PHY test pattern - * and current_test_pattern will contain required context for any future - * set pattern/set lane adjust to transition between override state(s). - * */ - enum dp_test_pattern current_test_pattern; - enum dp_test_pattern pending_test_pattern; - - union compliance_test_state compliance_test_state; - - void *priv; - - struct ddc_service *ddc; - - enum dp_panel_mode panel_mode; - bool aux_mode; - - /* Private to DC core */ - - const struct dc *dc; - - struct dc_context *ctx; - - struct panel_cntl *panel_cntl; - struct link_encoder *link_enc; - struct graphics_object_id link_id; - /* Endpoint type distinguishes display endpoints which do not have entries - * in the BIOS connector table from those that do. Helps when tracking link - * encoder to display endpoint assignments. - */ - enum display_endpoint_type ep_type; - union ddi_channel_mapping ddi_channel_mapping; - struct connector_device_tag_info device_tag; - struct dpcd_caps dpcd_caps; - uint32_t dongle_max_pix_clk; - unsigned short chip_caps; - unsigned int dpcd_sink_count; - struct hdcp_caps hdcp_caps; - enum edp_revision edp_revision; - union dpcd_sink_ext_caps dpcd_sink_ext_caps; - - struct psr_settings psr_settings; - struct replay_settings replay_settings; - - /* Drive settings read from integrated info table */ - struct dc_lane_settings bios_forced_drive_settings; - - /* Vendor specific LTTPR workaround variables */ - uint8_t vendor_specific_lttpr_link_rate_wa; - bool apply_vendor_specific_lttpr_link_rate_wa; - - /* MST record stream using this link */ - struct link_flags { - bool dp_keep_receiver_powered; - bool dp_skip_DID2; - bool dp_skip_reset_segment; - bool dp_skip_fs_144hz; - bool dp_mot_reset_segment; - /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ - bool dpia_mst_dsc_always_on; - /* Forced DPIA into TBT3 compatibility mode. */ - bool dpia_forced_tbt3_mode; - bool dongle_mode_timing_override; - bool blank_stream_on_ocs_change; - bool read_dpcd204h_on_irq_hpd; - bool force_dp_ffe_preset; - } wa_flags; - union dc_dp_ffe_preset forced_dp_ffe_preset; - struct link_mst_stream_allocation_table mst_stream_alloc_table; - - struct dc_link_status link_status; - struct dprx_states dprx_states; - - struct gpio *hpd_gpio; - enum dc_link_fec_state fec_state; - bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly - - struct dc_panel_config panel_config; - struct phy_state phy_state; - uint32_t phy_transition_bitmask; - // BW ALLOCATON USB4 ONLY - struct dc_dpia_bw_alloc dpia_bw_alloc_config; - bool skip_implict_edp_power_control; - enum backlight_control_type backlight_control_type; -}; - /* Return an enumerated dc_link. * dc_link order is constant and determined at * boot time. They cannot be created or destroyed. @@ -2366,17 +2440,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link( struct dc_link *link, int peak_bw); /* - * Validate the BW of all the valid DPIA links to make sure it doesn't exceed - * available BW for each host router + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * @dc: pointer to dc struct - * @stream: pointer to all possible streams - * @count: number of valid DPIA streams - * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE + * return: dc_status */ -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, - const unsigned int count); +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx); /* Sink Interfaces - A sink corresponds to a display output device */ @@ -2586,13 +2655,23 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); +bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index); + /* DSC Interfaces */ #include "dc_dsc.h" +void dc_get_visual_confirm_for_stream( + struct dc *dc, + struct dc_stream_state *stream_state, + struct tg_color *color); + /* Disable acc mode Interfaces */ void dc_disable_accelerated_mode(struct dc *dc); bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); +bool dc_is_cursor_limit_pending(struct dc *dc); +bool dc_can_clear_cursor_limit(struct dc *dc); + #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 614e03bfd598..f5ef1a07078e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -39,6 +39,7 @@ #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger +#define GPINT_RETRY_NUM 20 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, struct dmub_srv *dmub) @@ -70,20 +71,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) } } -void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) +bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv) { - struct dmub_srv *dmub = dc_dmub_srv->dmub; - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dmub_srv *dmub; + struct dc_context *dc_ctx; enum dmub_status status; + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dc_ctx = dc_dmub_srv->ctx; + dmub = dc_dmub_srv->dmub; + do { - status = dmub_srv_wait_for_idle(dmub, 100000); + status = dmub_srv_wait_for_pending(dmub, 100000); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } + + return status == DMUB_STATUS_OK; } void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) @@ -126,7 +135,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, } } -bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, +static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list) +{ + struct dc_context *dc_ctx; + struct dmub_srv *dmub; + enum dmub_status status = DMUB_STATUS_OK; + int i; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dc_ctx = dc_dmub_srv->ctx; + dmub = dc_dmub_srv->dmub; + + for (i = 0 ; i < count; i++) { + /* confirm no messages pending */ + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); + + /* queue command */ + if (status == DMUB_STATUS_OK) + status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]); + + /* check for errors */ + if (status != DMUB_STATUS_OK) { + break; + } + } + + if (status != DMUB_STATUS_OK) { + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } + return false; + } + + return true; +} + +static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list) { @@ -143,20 +194,25 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, for (i = 0 ; i < count; i++) { // Queue command - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + if (!cmd_list[i].cmd_common.header.multi_cmd_pending || + dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) { + status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); + } else { + status = DMUB_STATUS_QUEUE_FULL; + } if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - status = dmub_srv_cmd_execute(dmub); + status = dmub_srv_fb_cmd_execute(dmub); if (status == DMUB_STATUS_POWER_STATE_D3) return false; do { - status = dmub_srv_wait_for_idle(dmub, 100000); + status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i); } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); } if (status != DMUB_STATUS_OK) { @@ -168,7 +224,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, } } - status = dmub_srv_cmd_execute(dmub); + status = dmub_srv_fb_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) { DC_ERROR("Error starting DMUB execution: status=%d\n", status); @@ -180,6 +236,26 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, return true; } +bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list) +{ + bool res = false; + + if (dc_dmub_srv && dc_dmub_srv->dmub) { + if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) { + res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); + } else { + res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); + } + + if (res) + res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK; + } + + return res; +} + bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, enum dm_dmub_wait_type wait_type, union dmub_rb_cmd *cmd_list) @@ -202,7 +278,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); if (!dmub->debug.timeout_info.timeout_occured) { dmub->debug.timeout_info.timeout_occured = true; - dmub->debug.timeout_info.timeout_cmd = *cmd_list; + if (cmd_list) + dmub->debug.timeout_info.timeout_cmd = *cmd_list; dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); } dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); @@ -210,8 +287,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, } // Copy data back from ring buffer into command - if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) - dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) { + dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list); + } } return true; @@ -224,74 +302,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) { - struct dc_context *dc_ctx; - struct dmub_srv *dmub; - enum dmub_status status; - int i; - - if (!dc_dmub_srv || !dc_dmub_srv->dmub) + if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list)) return false; - dc_ctx = dc_dmub_srv->ctx; - dmub = dc_dmub_srv->dmub; - - for (i = 0 ; i < count; i++) { - // Queue command - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); - - if (status == DMUB_STATUS_QUEUE_FULL) { - /* Execute and wait for queue to become empty again. */ - status = dmub_srv_cmd_execute(dmub); - if (status == DMUB_STATUS_POWER_STATE_D3) - return false; - - status = dmub_srv_wait_for_idle(dmub, 100000); - if (status != DMUB_STATUS_OK) - return false; - - /* Requeue the command. */ - status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); - } - - if (status != DMUB_STATUS_OK) { - if (status != DMUB_STATUS_POWER_STATE_D3) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } - return false; - } - } - - status = dmub_srv_cmd_execute(dmub); - if (status != DMUB_STATUS_OK) { - if (status != DMUB_STATUS_POWER_STATE_D3) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - } - return false; - } - - // Wait for DMUB to process command - if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { - if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { - do { - status = dmub_srv_wait_for_idle(dmub, 100000); - } while (status != DMUB_STATUS_OK); - } else - status = dmub_srv_wait_for_idle(dmub, 100000); - - if (status != DMUB_STATUS_OK) { - DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); - return false; - } - - // Copy data back from ring buffer into command - if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) - dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); - } - - return true; + return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list); } bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) @@ -1243,7 +1257,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL); memset(&new_signals, 0, sizeof(new_signals)); @@ -1255,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; @@ -1272,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else { /* RCG only */ new_signals.bits.allow_pg = 0; @@ -1279,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips2 = 0; new_signals.bits.allow_z10 = 0; } + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } + // Setting RCG allow bits (IPSv2.0) + if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) { + new_signals.bits.allow_ips0_rcg = 1; + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) { + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) { + new_signals.bits.allow_ips0_rcg = 1; + } + // IPS dynamic allow bits (IPSv2 change, vpb use case) + if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) { + new_signals.bits.allow_dynamic_ips1 = 1; + } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) { + new_signals.bits.allow_dynamic_ips1 = 1; + new_signals.bits.allow_dynamic_ips1_z8 = 1; } - ips_driver->signals = new_signals; dc_dmub_srv->driver_signals = ips_driver->signals; } @@ -1304,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; - uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0; if (dc->debug.dmcub_emulation) return; @@ -1324,45 +1364,49 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) rcg_exit_count = ips_fw->rcg_exit_count; ips1_exit_count = ips_fw->ips1_exit_count; ips2_exit_count = ips_fw->ips2_exit_count; + ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count; ips_driver->signals.all = 0; dc_dmub_srv->driver_signals = ips_driver->signals; DC_LOG_IPS( - "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", + "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit, ips_fw->rcg_entry_count, ips_fw->ips1_entry_count, - ips_fw->ips2_entry_count); + ips_fw->ips2_entry_count, + ips_fw->ips1_z8ret_entry_count); /* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface. */ dc_dmub_srv->needs_idle_wake = false; - if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && + if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || - ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) { DC_LOG_IPS( - "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", + "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) udelay(dc->debug.ips2_eval_delay_us); - if (ips_fw->signals.bits.ips2_commit) { - DC_LOG_IPS( - "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", - ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + DC_LOG_IPS( + "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); - // Tell PMFW to exit low power state - dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + // Tell PMFW to exit low power state + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + + if (ips_fw->signals.bits.ips2_commit) { DC_LOG_IPS( "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)", @@ -1400,35 +1444,38 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); - dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); + dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub); } } dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( - "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); while (ips_fw->signals.bits.ips1_commit) udelay(1); DC_LOG_IPS( - "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", + DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)", __func__, rcg_exit_count, ips1_exit_count, - ips2_exit_count); + ips2_exit_count, + ips1z8_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) @@ -1641,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com return result; } -void dc_dmub_srv_fams2_update_config(struct dc *dc, +static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) { @@ -1654,7 +1701,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, /* fill in generic command header */ global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; - global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); + global_cmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); if (enable) { /* send global configuration parameters */ @@ -1673,11 +1721,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, /* configure command header */ stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; - stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); + stream_base_cmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); stream_base_cmd->header.multi_cmd_pending = 1; stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; - stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); + stream_sub_state_cmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); stream_sub_state_cmd->header.multi_cmd_pending = 1; /* copy stream static base state */ memcpy(&stream_base_cmd->config, @@ -1704,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr; + union dmub_rb_cmd cmd; + uint32_t i; + + memset(config, 0, sizeof(*config)); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; + cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG; + + cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr; + cmd.ib_fams2_config.ib_data.size = sizeof(*config); + + if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { + /* copy static feature configuration overrides */ + config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; + config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; + config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; + + /* send global configuration parameters */ + memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config, + sizeof(struct dmub_cmd_fams2_global_config)); + + /* construct per-stream configs */ + for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { + /* copy stream static base state */ + memcpy(&config->stream_v1[i].base, + &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], + sizeof(config->stream_v1[i].base)); + + /* copy stream static sub-state */ + memcpy(&config->stream_v1[i].sub_state, + &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i], + sizeof(config->stream_v1[i].sub_state)); + } + } + + config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; + config->global.features.bits.enable = enable; + + dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + if (dc->debug.fams_version.major == 2) + dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable); + if (dc->debug.fams_version.major == 3) + dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable); +} + void dc_dmub_srv_fams2_drr_update(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, @@ -1723,7 +1830,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc, cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; - cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); + cmd.fams2_drr_update.header.payload_bytes = + sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -1759,7 +1867,8 @@ void dc_dmub_srv_fams2_passthrough_flip( /* build command header */ cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; - cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip); + cmds[num_cmds].fams2_flip.header.payload_bytes = + sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header); /* for chaining multiple commands, all but last command should set to 1 */ cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; @@ -1827,80 +1936,267 @@ void dc_dmub_srv_fams2_passthrough_flip( } } -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) + +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement) { - bool result; + union dmub_rb_cmd cmd; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS; + cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL; + cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data); + + // only panel_inst=0 is supported at the moment + cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst; + cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; + + return true; +} + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode) +{ + union dmub_rb_cmd cmd; + uint32_t bytes = sizeof(struct dmub_ips_residency_info); + + dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS; + cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO; + cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data); + + cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.ips_query_residency_info.info_data.size = bytes; + cmd.ips_query_residency_info.info_data.panel_inst = panel_inst; + cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) || + cmd.ips_query_residency_info.header.ret_status == 0) return false; - result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, - start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + // copy the result to the output since ret_status != 0 means the command returned data + memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr; + lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Init failed in DMUB"); return result; } -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +bool dmub_lsdma_send_linear_copy_packet( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count) { - uint32_t i; - enum dmub_gpint_command command_code; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) - return; + memset(&cmd, 0, sizeof(cmd)); - switch (output->ips_mode) { - case DMUB_IPS_MODE_IPS1_MAX: - command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS2: - command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_RCG: - command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_ONO2_ON: - command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; - break; - default: - command_code = DMUB_GPINT__INVALID_COMMAND; - break; - } + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; - if (command_code == DMUB_GPINT__INVALID_COMMAND) + lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1 + lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Linear Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_x = params.src_x; + lsdma_data->u.tiled_copy_data.src_y = params.src_y; + lsdma_data->u.tiled_copy_data.dst_x = params.dst_x; + lsdma_data->u.tiled_copy_data.dst_y = params.dst_y; + lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1 + lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1 + lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.src_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.rect_x = params.rect_x; + lsdma_data->u.tiled_copy_data.rect_y = params.rect_y; + lsdma_data->u.tiled_copy_data.dcc = params.dcc; + lsdma_data->u.tiled_copy_data.tmz = params.tmz; + lsdma_data->u.tiled_copy_data.read_compress = params.read_compress; + lsdma_data->u.tiled_copy_data.write_compress = params.write_compress; + lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1 + lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1 + lsdma_data->u.tiled_copy_data.data_format = params.data_format; + lsdma_data->u.tiled_copy_data.max_com = params.max_com; + lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable; + lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1; + lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.data = data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Constfill failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.reg_write_data.reg_addr = reg_addr; + lsdma_data->u.reg_write_data.reg_data = reg_data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Poll Reg failed in DMUB"); + + return result; +} + +void dc_dmub_srv_release_hw(const struct dc *dc) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd = {0}; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) return; - // send gpint commands and wait for ack - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, - (uint16_t)(output->ips_mode), - &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->residency_percent = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, - (uint16_t)(output->ips_mode), - &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->entry_counter = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[1] = 0; - - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[1] = 0; - - // NUM_IPS_HISTOGRAM_BUCKETS = 16 - for (i = 0; i < 16; i++) - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->histogram[i] = 0; + memset(&cmd, 0, sizeof(cmd)); + cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; + cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW; + cmd.idle_opt_notify_idle.header.payload_bytes = + sizeof(cmd.idle_opt_notify_idle) - + sizeof(cmd.idle_opt_notify_idle.header); + + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index a636f4c3f01d..8ea320f21269 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -58,7 +58,7 @@ struct dc_dmub_srv { bool needs_idle_wake; }; -void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); @@ -210,6 +210,60 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_surface_update *srf_updates, int surface_count); +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv); +bool dmub_lsdma_send_linear_copy_packet( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count); +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable); +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data); + +struct lsdma_send_tiled_to_tiled_copy_command_params { + uint64_t src_addr; + uint64_t dst_addr; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t src_width : 16; + uint32_t dst_width : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_height : 16; + uint32_t dst_height : 16; + + uint32_t data_format : 6; + uint32_t swizzle_mode : 5; + uint32_t element_size : 3; + uint32_t dcc : 1; + uint32_t tmz : 1; + uint32_t read_compress : 2; + uint32_t write_compress : 2; + uint32_t max_com : 2; + uint32_t max_uncom : 1; + uint32_t padding : 9; +}; + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params); +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data); + /** * struct ips_residency_info - struct containing info from dmub_ips_residency_stats * @@ -223,7 +277,7 @@ void dc_dmub_srv_fams2_passthrough_flip( * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c */ struct ips_residency_info { - enum dmub_ips_mode ips_mode; + enum ips_residency_mode ips_mode; unsigned int residency_percent; unsigned int entry_counter; unsigned int total_active_time_us[2]; @@ -231,21 +285,16 @@ struct ips_residency_info { unsigned int histogram[16]; }; -/** - * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status - * - * @dc_dmub_srv: The DC DMUB service pointer - * @start_measurement: Describes whether to start or stop measurement - * - * Return: true if GPINT was sent successfully, false otherwise - */ -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement); + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, + struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode); /** - * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * - * @dc_dmub_srv: The DC DMUB service pointer - * @output: Output struct to copy the the residency info to + * @dc - pointer to DC object */ -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); +void dc_dmub_srv_release_hw(const struct dc *dc); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 77c87ad57220..5ce1be362534 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -159,6 +159,16 @@ struct dc_link_settings { uint8_t link_rate_set; }; +struct dc_tunnel_settings { + bool should_enable_dp_tunneling; + bool should_use_dp_bw_allocation; + uint8_t cm_id; + uint8_t group_id; + uint32_t bw_granularity; + uint32_t estimated_bw; + uint32_t allocated_bw; +}; + union dc_dp_ffe_preset { struct { uint8_t level : 4; @@ -943,10 +953,30 @@ union dpia_info { uint8_t raw; }; +/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */ +union usb4_driver_bw_cap { + struct { + uint8_t rsvd :7; + uint8_t driver_bw_alloc_support :1; + } bits; + uint8_t raw; +}; + +/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */ +union dpia_tunnel_info { + struct { + uint8_t group_id :3; + uint8_t rsvd :5; + } bits; + uint8_t raw; +}; + /* DP Tunneling over USB4 */ struct dpcd_usb4_dp_tunneling_info { union dp_tun_cap_support dp_tun_cap; union dpia_info dpia_info; + union usb4_driver_bw_cap driver_bw_cap; + union dpia_tunnel_info dpia_tunnel_info; uint8_t usb4_driver_id; uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; @@ -1157,8 +1187,8 @@ struct dc_lttpr_caps { union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; union dp_alpm_lttpr_cap alpm; uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; - uint8_t lttpr_ieee_oui[3]; - uint8_t lttpr_device_id[6]; + uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host + uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host }; struct dc_dongle_dfp_cap_ext { @@ -1486,5 +1516,11 @@ struct dp_trace { # ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED # define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3) # endif +# ifndef DPTX_BW_ALLOC_UNMASK_IRQ +# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6) +# endif +# ifndef DPTX_BW_ALLOC_MODE_ENABLE +# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7) +# endif #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.c b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c new file mode 100644 index 000000000000..fee69642fb93 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MIT +// +// Copyright 2025 Advanced Micro Devices, Inc. + +#include "dc_fused_io.h" + +#include "dm_helpers.h" +#include "gpio.h" + +static bool op_i2c_convert( + union dmub_rb_cmd *cmd, + const struct mod_hdcp_atomic_op_i2c *op, + enum dmub_cmd_fused_request_type type, + uint32_t ddc_line, + bool over_aux +) +{ + struct dmub_cmd_fused_request *req = &cmd->fused_io.request; + struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c; + + if (!op || op->size > sizeof(req->buffer)) + return false; + + req->type = type; + loc->is_aux = false; + loc->ddc_line = ddc_line; + loc->over_aux = over_aux; + loc->address = op->address; + loc->offset = op->offset; + loc->length = op->size; + memcpy(req->buffer, op->data, op->size); + + return true; +} + +static bool op_aux_convert( + union dmub_rb_cmd *cmd, + const struct mod_hdcp_atomic_op_aux *op, + enum dmub_cmd_fused_request_type type, + uint32_t ddc_line +) +{ + struct dmub_cmd_fused_request *req = &cmd->fused_io.request; + struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux; + + if (!op || op->size > sizeof(req->buffer)) + return false; + + req->type = type; + loc->is_aux = true; + loc->ddc_line = ddc_line; + loc->address = op->address; + loc->length = op->size; + memcpy(req->buffer, op->data, op->size); + + return true; +} + +static bool atomic_write_poll_read( + struct dc_link *link, + union dmub_rb_cmd commands[3], + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) +{ + const uint8_t count = 3; + const uint32_t timeout_per_request_us = 10000; + const uint32_t timeout_per_aux_transaction_us = 10000; + uint64_t timeout_us = 0; + + commands[1].fused_io.request.poll_mask_msb = poll_mask_msb; + commands[1].fused_io.request.timeout_us = poll_timeout_us; + + for (uint8_t i = 0; i < count; i++) { + struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io; + + io->header.type = DMUB_CMD__FUSED_IO; + io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE; + io->header.multi_cmd_pending = i != count - 1; + io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header); + + timeout_us += timeout_per_request_us + io->request.timeout_us; + if (!io->request.timeout_us && io->request.u.aux.is_aux) + timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16); + } + + if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us)) + return false; + + return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS; +} + +bool dm_atomic_write_poll_read_i2c( + struct dc_link *link, + const struct mod_hdcp_atomic_op_i2c *write, + const struct mod_hdcp_atomic_op_i2c *poll, + struct mod_hdcp_atomic_op_i2c *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) +{ + if (!link) + return false; + + const bool over_aux = false; + const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en; + + union dmub_rb_cmd commands[3] = { 0 }; + const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line, over_aux) + && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line, over_aux) + && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line, over_aux); + + if (!converted) + return false; + + const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb); + + memcpy(read->data, commands[0].fused_io.request.buffer, read->size); + return result; +} + +bool dm_atomic_write_poll_read_aux( + struct dc_link *link, + const struct mod_hdcp_atomic_op_aux *write, + const struct mod_hdcp_atomic_op_aux *poll, + struct mod_hdcp_atomic_op_aux *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +) +{ + if (!link) + return false; + + const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en; + union dmub_rb_cmd commands[3] = { 0 }; + const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line) + && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line) + && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line); + + if (!converted) + return false; + + const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb); + + memcpy(read->data, commands[0].fused_io.request.buffer, read->size); + return result; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.h b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h new file mode 100644 index 000000000000..c74917240985 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + */ + +#ifndef __DC_FUSED_IO_H__ +#define __DC_FUSED_IO_H__ + +#include "dc.h" +#include "mod_hdcp.h" + +bool dm_atomic_write_poll_read_i2c( + struct dc_link *link, + const struct mod_hdcp_atomic_op_i2c *write, + const struct mod_hdcp_atomic_op_i2c *poll, + struct mod_hdcp_atomic_op_i2c *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +); + +bool dm_atomic_write_poll_read_aux( + struct dc_link *link, + const struct mod_hdcp_atomic_op_aux *write, + const struct mod_hdcp_atomic_op_aux *poll, + struct mod_hdcp_atomic_op_aux *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb +); + +#endif // __DC_FUSED_IO_H__ + diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 8f077e15b4f0..7217de258851 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -682,13 +682,19 @@ void reg_sequence_wait_done(const struct dc_context *ctx) if (offload && ctx->dc->debug.dmub_offload_enabled && !ctx->dc->debug.dmcub_emulation) { - dc_dmub_srv_wait_idle(ctx->dmub_srv); + dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL); } } char *dce_version_to_string(const int version) { switch (version) { + case DCE_VERSION_6_0: + return "DCE 6.0"; + case DCE_VERSION_6_1: + return "DCE 6.1"; + case DCE_VERSION_6_4: + return "DCE 6.4"; case DCE_VERSION_8_0: return "DCE 8.0"; case DCE_VERSION_8_1: diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index d562ddeca512..667852517246 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -68,7 +68,7 @@ enum dc_plane_addr_type { struct dc_plane_address { enum dc_plane_addr_type type; - bool tmz_surface; + uint8_t tmz_surface; union { struct{ PHYSICAL_ADDRESS_LOC addr; @@ -974,6 +974,7 @@ struct dc_crtc_timing { uint32_t pix_clk_100hz; uint32_t min_refresh_in_uhz; + uint32_t max_refresh_in_uhz; uint32_t vic; uint32_t hdmi_vic; @@ -1103,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select { enum mpcc_gamut_remap_id { MPCC_OGAM_GAMUT_REMAP, MPCC_MCM_FIRST_GAMUT_REMAP, - MPCC_MCM_SECOND_GAMUT_REMAP + MPCC_MCM_SECOND_GAMUT_REMAP, + MPCC_RMCM_GAMUT_REMAP, }; enum cursor_matrix_mode { diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index e9413685ed4f..14feb843e694 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -28,13 +28,24 @@ #include "dc_hw_types.h" +union dc_plane_status_update_flags { + struct { + uint32_t address : 1; + } bits; + uint32_t raw; +}; + struct dc_plane_state *dc_create_plane_state(const struct dc *dc); const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state); + const struct dc_plane_state *plane_state, + union dc_plane_status_update_flags flags); void dc_plane_state_retain(struct dc_plane_state *plane_state); void dc_plane_state_release(struct dc_plane_state *plane_state); void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state, bool clear_tiling); + +void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src); + #endif /* _DC_PLANE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index e3a8283b4098..7f57661433eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->adaptive_sharpness.enable = true; spl_in->adaptive_sharpness.sharpness_level = 0; } else if (sharpness_setting == SHARPNESS_CUSTOM) { - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500; + /* SAT: read harpness_range from dc_plane_state */ + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid; if (force_sharpness_level > 0) { if (force_sharpness_level > 10) diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h index 1a12ef579ff4..1d9bae56ff6a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use( const struct dc *dc, const struct dc_state *state); + +void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit); + +bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream, + struct dc_state *state); + +void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state, + bool limit); + +bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_is_subvp_in_use(struct dc_state *state); + #endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e0bfddaa23e3..5fc6fea211de 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -44,6 +44,8 @@ struct mall_stream_config { */ enum mall_stream_type type; struct dc_stream_state *paired_stream; // master / slave stream + bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */ + bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */ }; struct dc_stream_status { @@ -503,6 +505,11 @@ void program_cursor_position( struct dc *dc, struct dc_stream_state *stream); +bool dc_stream_check_cursor_attributes( + const struct dc_stream_state *stream, + struct dc_state *state, + const struct dc_cursor_attributes *attributes); + bool dc_stream_set_cursor_attributes( struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes); @@ -572,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc, bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream); +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one); + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream); + +void dc_stream_init_rmcm_3dlut(struct dc *dc); + struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream); void dc_dmub_update_dirty_rect(struct dc *dc, @@ -579,4 +597,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc, struct dc_stream_state *stream, struct dc_surface_update *srf_updates, struct dc_state *context); + +bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream); +bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream); + #endif /* DC_STREAM_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 83ffaae9f439..375ca2f13b7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -175,6 +175,7 @@ struct dc_panel_patch { unsigned int embedded_tiled_slave; unsigned int disable_fams; unsigned int skip_avmute; + unsigned int skip_audio_sab_check; unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; @@ -210,6 +211,7 @@ struct dc_edid_caps { bool edid_hdmi; bool hdr_supported; + bool rr_capable; struct dc_panel_patch panel_patch; }; @@ -262,6 +264,7 @@ enum dc_timing_source { TIMING_SOURCE_EDID_4BYTE, TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB, TIMING_SOURCE_EDID_CEA_RID, + TIMING_SOURCE_EDID_DISPLAYID_TYPE5, TIMING_SOURCE_VBIOS, TIMING_SOURCE_CV, TIMING_SOURCE_TV, @@ -1089,7 +1092,8 @@ union replay_low_refresh_rate_enable_options { struct { //BIT[0-3]: Replay Low Hz Support control unsigned int ENABLE_LOW_RR_SUPPORT :1; - unsigned int RESERVED_1_3 :3; + unsigned int SKIP_ASIC_CHECK :1; + unsigned int RESERVED_2_3 :2; //BIT[4-15]: Replay Low Hz Enable Scenarios unsigned int ENABLE_STATIC_SCREEN :1; unsigned int ENABLE_FULL_SCREEN_VIDEO :1; @@ -1129,6 +1133,10 @@ struct replay_config { union replay_low_refresh_rate_enable_options low_rr_enable_options; /* Replay coasting vtotal is within low refresh rate range. */ bool low_rr_activated; + /* Replay low refresh rate supported*/ + bool low_rr_supported; + /* Replay Video Conferencing Optimization Enabled */ + bool replay_video_conferencing_optimization_enabled; }; /* Replay feature flags*/ @@ -1270,7 +1278,7 @@ struct dc_cm2_gpu_mem_format_parameters { enum dc_cm2_gpu_mem_size { DC_CM2_GPU_MEM_SIZE_171717, - DC_CM2_GPU_MEM_SIZE_TRANSFORMED + DC_CM2_GPU_MEM_SIZE_TRANSFORMED, }; struct dc_cm2_gpu_mem_parameters { @@ -1279,6 +1287,7 @@ struct dc_cm2_gpu_mem_parameters { struct dc_cm2_gpu_mem_format_parameters format_params; enum dc_cm2_gpu_mem_pixel_component_order component_order; enum dc_cm2_gpu_mem_size size; + uint16_t bit_depth; }; enum dc_cm2_transfer_func_source { @@ -1302,6 +1311,11 @@ struct dc_cm2_func_luts { const struct dc_3dlut *lut3d_func; struct dc_cm2_gpu_mem_parameters gpu_mem_params; }; + bool rmcm_3dlut_shaper_select; + bool mpc_3dlut_enable; + bool rmcm_3dlut_enable; + bool mpc_mcm_post_blend; + uint8_t rmcm_tmz; } lut3d_data; const struct dc_transfer_func *lut1d_func; }; @@ -1359,4 +1373,19 @@ struct set_backlight_level_params { uint8_t aux_inst; }; +enum dc_validate_mode { + /* validate the mode and program HW */ + DC_VALIDATE_MODE_AND_PROGRAMMING = 0, + /* only validate the mode */ + DC_VALIDATE_MODE_ONLY = 1, + /* validate the mode and get the max state (voltage level) */ + DC_VALIDATE_MODE_AND_STATE_INDEX = 2, +}; + +struct dc_validation_dpia_set { + const struct dc_link *link; + const struct dc_tunnel_settings *tunnel_settings; + uint32_t required_bw; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index b363f5360818..58c84f555c0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -391,6 +391,7 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg, struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable) return; @@ -411,6 +412,8 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg, BREAK_TO_DEBUGGER(); break; } + //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1); + } static void dccg35_set_dpstreamclk_rcg( @@ -1035,6 +1038,7 @@ static void dccg35_enable_dpp_clk_new( DPPCLK0_DTO_MODULO, 0xFF); } + static void dccg35_disable_dpp_clk_new( struct dccg *dccg, int inst) @@ -1112,30 +1116,24 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg, { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + switch (dpp_inst) { case 0: REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); break; case 1: REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); break; case 2: REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); break; case 3: REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); break; default: break; } + //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable); } @@ -1163,14 +1161,18 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, ASSERT(false); phase = 0xff; } + dccg35_set_dppclk_rcg(dccg, dpp_inst, false); REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, phase, DPPCLK0_DTO_MODULO, modulo); dcn35_set_dppclk_enable(dccg, dpp_inst, true); - } else + } else { dcn35_set_dppclk_enable(dccg, dpp_inst, false); + /*we have this in hwss: disable_plane*/ + //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + } dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } @@ -1182,6 +1184,7 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg, if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) return; + switch (dpp_inst) { case 0: REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); @@ -1198,6 +1201,8 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg, default: break; } + //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable); + } static void dccg35_get_pixel_rate_div( @@ -1521,28 +1526,30 @@ static void dccg35_set_physymclk_root_clock_gating( switch (phy_inst) { case 0: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); break; case 1: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); break; case 2: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); break; case 3: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); break; case 4: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1); break; default: BREAK_TO_DEBUGGER(); return; } + //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1); + } static void dccg35_set_physymclk( @@ -1643,6 +1650,8 @@ static void dccg35_dpp_root_clock_control( return; if (clock_on) { + dccg35_set_dppclk_rcg(dccg, dpp_inst, false); + /* turn off the DTO and leave phase/modulo at max */ dcn35_set_dppclk_enable(dccg, dpp_inst, 1); REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, @@ -1654,6 +1663,8 @@ static void dccg35_dpp_root_clock_control( REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0, DPPCLK0_DTO_MODULO, 1); + /*we have this in hwss: disable_plane*/ + //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } dccg->dpp_clock_gated[dpp_inst] = !clock_on; @@ -1771,36 +1782,40 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) //Disable DTO switch (inst) { case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); + REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); break; case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); + REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); break; case 2: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); + REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); break; case 3: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); + REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); break; default: BREAK_TO_DEBUGGER(); @@ -1813,9 +1828,6 @@ static void dccg35_disable_dscclk(struct dccg *dccg, { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) - return; - switch (inst) { case 0: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index ffd172231fdf..668ee2d405fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg) } } -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst) +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h index 55e8718aad22..5947a35363aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h @@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h); void dccg401_set_ref_dscclk(struct dccg *dccg, uint32_t dsc_inst); void dccg401_set_src_sel( @@ -230,7 +230,6 @@ void dccg401_set_dp_dto( const struct dp_dto_params *params); void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); void dccg401_set_dtbclk_p_src( struct dccg *dccg, enum streamclk_source src, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 077337698e0a..b4f5b4a6331a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -976,11 +976,12 @@ static bool dcn31_program_pix_clk( struct bp_pixel_clock_parameters bp_pc_params = {0}; enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; - // Apply ssed(spread spectrum) dpref clock for edp only. - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 - && pix_clk_params->signal_type == SIGNAL_TYPE_EDP - && encoding == DP_8b_10b_ENCODING) + // Apply ssed(spread spectrum) dpref clock for edp and dp + if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 && + dc_is_dp_signal(pix_clk_params->signal_type) && + encoding == DP_8b_10b_ENCODING) dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; + // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) { if (e) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 0721ae895ae9..94128f7a18b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -257,7 +257,7 @@ bool dce110_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, struct dc_bios *bios, - enum clock_source_id, + enum clock_source_id id, const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index d28826c3ae5f..0421b267a0b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -292,9 +292,35 @@ static void set_speed( FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } +static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + switch (arbitrate) { + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW: + return true; + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW: + return false; + case DC_I2C_STATUS__DC_I2C_STATUS_IDLE: + default: + break; + } + + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return false; + + return true; +} + static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { + // Deassert soft reset to unblock I2C engine registers + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false); + uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; uint32_t reset_length = 0; @@ -309,8 +335,8 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); + if (!acquire_engine(dce_i2c_hw)) + return false; /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); @@ -319,9 +345,8 @@ static bool setup_engine( i2c_setup_limit = dce_i2c_hw->setup_limit; /* Program pin select */ - REG_UPDATE_6(DC_I2C_CONTROL, + REG_UPDATE_5(DC_I2C_CONTROL, DC_I2C_GO, 0, - DC_I2C_SOFT_RESET, 0, DC_I2C_SEND_RESET, 0, DC_I2C_SW_STATUS_RESET, 1, DC_I2C_TRANSACTION_COUNT, 0, @@ -351,6 +376,32 @@ static bool setup_engine( return true; } +/** + * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state + * @dce_i2c_hw: Pointer to dce_i2c_hw structure + * + * If we boot without an HDMI display, the I2C engine does not get initialized + * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after + * acquire. After setting SW_DONE_USING_I2C on release, the engine gets + * immediately reacquired by SW, preventing DMUB from using it. + * + * This function checks the I2C arbitration status and applies a release + * workaround if necessary. + */ +static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return; + + // Still acquired after release, release again as a workaround + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW); +} + static void release_engine( struct dce_i2c_hw *dce_i2c_hw) { @@ -378,9 +429,9 @@ static void release_engine( /*for HW HDCP Ri polling failure w/a test*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp); - /* Release I2C after reset, so HW or DMCU could use it */ - REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1, - DC_I2C_SW_USE_I2C_REG_REQ, 0); + // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + cntl_stuck_hw_workaround(dce_i2c_hw); if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) { if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index ccc154b0281c..3b9011ef9b68 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -28,6 +28,8 @@ #include "dc.h" #include "core_types.h" #include "dmub_cmd.h" +#include "dc_dmub_srv.h" +#include "dmub/dmub_srv.h" #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 0d7e7f3b81a1..a641ae04450c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -240,7 +240,8 @@ bool dmub_abm_save_restore( cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask; - cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); + cmd.abm_save_restore.header.payload_bytes = + sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header); dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index c31e4f26a305..e7a318e26d38 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -4,6 +4,7 @@ #include "dc.h" #include "dc_dmub_srv.h" +#include "dc_dp_types.h" #include "dmub/dmub_srv.h" #include "core_types.h" #include "dmub_replay.h" @@ -43,21 +44,45 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s /* * Enable/Disable Replay. */ -static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) +static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst, + struct dc_link *link) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; uint32_t retry_count; enum replay_state state = REPLAY_STATE_0; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; + uint8_t i; memset(&cmd, 0, sizeof(cmd)); cmd.replay_enable.header.type = DMUB_CMD__REPLAY; cmd.replay_enable.data.panel_inst = panel_inst; cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; - if (enable) + if (enable) { cmd.replay_enable.data.enable = REPLAY_ENABLE; - else + // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay + if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { + for (i = 0; i < MAX_PIPES; i++) { + if (res_ctx && + res_ctx->pipe_ctx[i].stream && + res_ctx->pipe_ctx[i].stream->link && + res_ctx->pipe_ctx[i].stream->link == link && + res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { + pipe_ctx = &res_ctx->pipe_ctx[i]; + //TODO: refactor for multi edp support + break; + } + } + + if (!pipe_ctx) + return; + + cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; + } + } else cmd.replay_enable.data.enable = REPLAY_DISABLE; cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); @@ -149,6 +174,17 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, copy_settings_data->digbe_inst = replay_context->digbe_inst; copy_settings_data->digfe_inst = replay_context->digfe_inst; + if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { + if (pipe_ctx->stream_res.hpo_dp_stream_enc) + copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + else + copy_settings_data->hpo_stream_enc_inst = 0; + if (pipe_ctx->link_res.hpo_dp_link_enc) + copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; + else + copy_settings_data->hpo_link_enc_inst = 0; + } + if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else @@ -211,6 +247,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); + pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; @@ -280,7 +317,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm memset(&cmd, 0, sizeof(cmd)); pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL; - pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal); + pCmd->header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) - + sizeof(struct dmub_cmd_header); pCmd->replay_set_power_opt_data.power_opt = power_opt; pCmd->replay_set_power_opt_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); @@ -319,7 +358,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_timing_sync.header.sub_type = DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED; cmd.replay_set_timing_sync.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_timing_sync); + sizeof(struct dmub_rb_cmd_replay_set_timing_sync) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst = cmd_element->sync_data.panel_inst; @@ -331,7 +371,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_frameupdate_timer.header.sub_type = DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER; cmd.replay_set_frameupdate_timer.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer); + sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_frameupdate_timer.data.panel_inst = cmd_element->panel_inst; @@ -345,7 +386,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_pseudo_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL; cmd.replay_set_pseudo_vtotal.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal); + sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_pseudo_vtotal.data.panel_inst = cmd_element->pseudo_vtotal_data.panel_inst; @@ -357,7 +399,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_disabled_adaptive_sync_sdp.header.sub_type = DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP; cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp); + sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst = cmd_element->disabled_adaptive_sync_sdp_data.panel_inst; @@ -369,7 +412,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub, cmd.replay_set_general_cmd.header.sub_type = DMUB_CMD__REPLAY_SET_GENERAL_CMD; cmd.replay_set_general_cmd.header.payload_bytes = - sizeof(struct dmub_rb_cmd_replay_set_general_cmd); + sizeof(struct dmub_rb_cmd_replay_set_general_cmd) - + sizeof(struct dmub_cmd_header); //Cmd Body cmd.replay_set_general_cmd.data.panel_inst = cmd_element->set_general_cmd_data.panel_inst; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e6346c0ffc0e..ccbe385e132c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -19,7 +19,7 @@ struct dmub_replay_funcs { void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst); void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, - uint8_t panel_inst); + uint8_t panel_inst, struct dc_link *link); bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile index eede83ad91fa..824f73eb3326 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile @@ -25,8 +25,7 @@ CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init -DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ - dce60_resource.o +DCE60 = dce60_timing_generator.o AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60)) diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c index 003a9330c286..88e7a1fc9a30 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c @@ -105,7 +105,7 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz) dm_write_reg(tg->ctx, addr, value); } -static void program_timing(struct timing_generator *tg, +static void dce80_timing_generator_program_timing(struct timing_generator *tg, const struct dc_crtc_timing *timing, int vready_offset, int vstartup_start, @@ -185,7 +185,7 @@ static void dce80_timing_generator_enable_advanced_request( static const struct timing_generator_funcs dce80_tg_funcs = { .validate_timing = dce110_tg_validate_timing, - .program_timing = program_timing, + .program_timing = dce80_timing_generator_program_timing, .enable_crtc = dce110_timing_generator_enable_crtc, .disable_crtc = dce110_timing_generator_disable_crtc, .is_counter_moving = dce110_timing_generator_is_counter_moving, diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 5efddd48d5c5..9d160b39e8c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -153,6 +153,14 @@ bool dm_helpers_submit_i2c( const struct dc_link *link, struct i2c_command *cmd); +bool dm_helpers_execute_fused_io( + struct dc_context *ctx, + struct dc_link *link, + union dmub_rb_cmd *commands, + uint8_t count, + uint32_t timeout_us +); + bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index e1d500633dfa..b357683b4255 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags) - ifdef CONFIG_DRM_AMD_DC_FP DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o DML += dcn10/dcn10_fpu.o @@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o DML += dcn314/dcn314_fpu.o DML += dcn35/dcn35_fpu.o DML += dcn351/dcn351_fpu.o -DML += dcn401/dcn401_fpu.o DML += dsc/rc_calc_fpu.o DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index f1235bf9a596..74962791302f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic) bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { /* * we want a breakdown of the various stages of validation, which the @@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth( BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (v->voltage_level != number_of_states_plus_one && !fast_validate) { + if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) @@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth( } } else if (v->voltage_level == number_of_states_plus_one) { BW_VAL_TRACE_SKIP(fail); - } else if (fast_validate) { + } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index e9fea9c2162e..2a2eaf6adf26 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1315,7 +1315,7 @@ static void swizzle_to_dml_params( int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i; bool synchronized_vblank = true; @@ -1733,7 +1733,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; @@ -1780,10 +1780,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2027,7 +2027,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st } static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2040,7 +2040,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_COUNT(); - out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2050,12 +2050,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -2077,7 +2077,7 @@ validate_out: } bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool voltage_supported = false; bool full_pstate_supported = false; @@ -2095,12 +2095,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - if (fast_validate) { - return dcn20_validate_bandwidth_internal(dc, context, true, pipes); - } + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes); // Best case, we support full UCLK switch latency - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || @@ -2113,7 +2112,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st)); - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { @@ -2156,14 +2155,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { @@ -2239,7 +2238,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel_req, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; int vlevel, vlevel_max; @@ -2281,10 +2280,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2319,7 +2318,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, } bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2337,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2347,12 +2346,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h index b6c34198ddc8..aed00039ca62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc, int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks); void dcn20_update_bounding_box(struct dc *dc, @@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc, void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes); void dcn20_fpu_set_wm_ranges(int i, struct pp_smu_wm_range_sets *ranges, struct _vcs_dpi_soc_bounding_box_st *loaded_bb); @@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); -bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool - fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode); +bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum + dc_validate_mode, display_e2e_pipe_params_st *pipes); void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index 88789987bdbc..e5f5c0663750 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg( * newly found dummy_latency_index */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; @@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, while (dummy_latency_index < max_latency_table_entries) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank == dm_allow_self_refresh_and_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index f1fe49401bc0..8d24763938ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule( dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC); Tsw_oto = Lsw_oto * LineTime; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index d2ae43a82ba5..dfcc5d50071e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate( int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* __DCN31_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index f567a9023682..ed59c77bc6f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule( Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0; dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto; dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0; Tpre_rounded = dst_y_prefetch_equ * LineTime; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 5ed117e11aa2..df9d50b9b57c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c dc_assert_fp_enabled(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h index d32c5bb99f4c..362ac79184ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h @@ -35,6 +35,6 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 5865e8fa2d8e..9f3938a50240 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule( Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0; dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto; dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0; Tpre_rounded = dst_y_prefetch_equ * LineTime; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 56dda686e299..6160952245b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); /* for subvp + DRR case, if subvp pipes are still present we support pstate */ if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && @@ -627,6 +627,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, */ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) && !pipe->stream->hw_cursor_req && + !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && @@ -1478,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) @@ -1516,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx); - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); // Populate dppclk to trigger a recalculate in dml_get_voltage_level // so the phantom pipe DLG params can be assigned correctly. pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); @@ -1559,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc_state_remove_phantom_streams_and_planes(dc, context); dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); /* This may adjust vlevel and maxMpcComb */ @@ -2137,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; bool repopulate_pipes = false; @@ -2161,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, for (i = 0; i < context->stream_count; i++) resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -2171,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc, dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt, &repopulate_pipes)) goto validate_fail; } - if (fast_validate || + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || (dc->debug.dml_disallow_alternate_prefetch_modes && (vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { @@ -2194,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_none; - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.validate_max_state = false; @@ -2246,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, int flag_vlevel = vlevel; int i; - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!dc->config.enable_windowed_mpo_odm) dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); @@ -2342,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, } context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; if (is_subvp_p_drr) { context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; @@ -2388,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; } - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, + DC_VALIDATE_MODE_AND_PROGRAMMING); if (vlevel_temp < vlevel) { vlevel = vlevel_temp; maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; @@ -2409,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, stream_status->fpo_in_use = false; } context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 276e90e4e0ce..273d2bd79d85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 92f0a099d089..5d73efa2f0c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h index 067480fc3691..d121c5afce71 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h @@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 17d0b4923b0c..6f516af82956 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h index f93efab9a668..f71d9d8d0759 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h @@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c deleted file mode 100644 index 4fbecb5ff349..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#include "dcn401_fpu.h" -#include "dcn401/dcn401_resource.h" -// We need this includes for WATERMARKS_* defines -#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h" -#include "link.h" - -#define DC_LOGGER_INIT(logger) - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) -{ - /* defaults */ - double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us; - double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us; - double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us; - double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us; - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - uint16_t setb_min_uclk_mhz = min_uclk_mhz; - uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz; - - dc_assert_fp_enabled(); - - /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */ - if (dcfclk_mhz_for_the_second_state) - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state; - else - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz) - setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; - clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; - clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; - clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8; - clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5; - } - /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */ - /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */ - clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF; -} - -/* - * dcn401_update_bw_bounding_box - * - * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from - * spreadsheet with actual values as per dGPU SKU: - * - with passed few options from dc->config - * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might - * need to get it from PM FW) - * - with passed latency values (passed in ns units) in dc-> bb override for - * debugging purposes - * - with passed latencies from VBIOS (in 100_ns units) if available for - * certain dGPU SKU - * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU - * of the same ASIC) - * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM - * FW for different clocks (which might differ for certain dGPU SKU of the - * same ASIC) - */ -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) -{ - dc_assert_fp_enabled(); - - /* Override from passed dc->bb_overrides if available*/ - if (dc->bb_overrides.sr_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - dc->bb_overrides.sr_exit_time_ns / 1000.0; - - if (dc->bb_overrides.sr_enter_plus_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - - if (dc->bb_overrides.urgent_latency_ns) - dc->dml2_options.bbox_overrides.urgent_latency_us = - dc->bb_overrides.urgent_latency_ns / 1000.0; - - if (dc->bb_overrides.dram_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - - if (dc->bb_overrides.fclk_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } - - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dc->dml2_options.bbox_overrides.dram_num_chan = - dc->ctx->dc_bios->vram_info.num_chans; - - } - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dc->dml2_options.bbox_overrides.dram_chanel_width_bytes = - dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0; - dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0; - - if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) { - unsigned int i = 0; - - dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) { - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - } - } - } -} - diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h deleted file mode 100644 index 329f1788843c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#ifndef __DCN401_FPU_H__ -#define __DCN401_FPU_H__ - -#include "clk_mgr.h" - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr); - -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 21fd466dba26..4c21ce42054c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) AMD_DISPLAY_FILES += $(AMD_DAL_DML2) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) @@ -94,18 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) - - CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) @@ -117,12 +116,11 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_floa CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags) DML21 := src/dml2_top/dml2_top_interfaces.o DML21 += src/dml2_top/dml2_top_soc15.o -DML21 += src/inc/dml2_debug.o DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o DML21 += src/dml2_core/dml2_core_factory.o DML21 += src/dml2_core/dml2_core_dcn4_calcs.o DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 7ae9c0ba0c9e..715f9019a33e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx, in_out_params->mode_lib, in_out_params->in_display_cfg, - 0, + in_out_params->in_start_state_idx, in_out_params->mode_lib->states.num_states - 1); if (result) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h index 0670e4dc4fd9..dbeb08466092 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h @@ -1917,6 +1917,7 @@ struct display_mode_lib_st { struct dml_mode_support_ex_params_st { struct display_mode_lib_st *mode_lib; const struct dml_display_cfg_st *in_display_cfg; + dml_uint_t in_start_state_idx; dml_uint_t out_lowest_state_idx; struct dml_mode_support_info_st *out_evaluation_info; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 0c8ec30ea672..a06217a9eef6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml21_wrapper.h" #include "dml2_core_dcn4_calcs.h" #include "dml2_internal_shared_types.h" @@ -11,277 +10,263 @@ #include "dml21_translation_helper.h" #include "bounding_boxes/dcn4_soc_bb.h" -static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, - const struct dc *in_dc) -{ - const struct dml2_soc_bb *soc_bb; - const struct dml2_soc_qos_parameters *qos_params; - - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - default: - if (config->bb_from_dmub) - soc_bb = config->bb_from_dmub; - else - soc_bb = &dml2_socbb_dcn401; - - qos_params = &dml_dcn4_variant_a_soc_qos_params; - } - - /* patch soc bb */ - memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb)); - - /* patch qos params */ - memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters)); -} - -static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config) -{ - memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb)); -} - -static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init, +static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options, + const struct dc *in_dc, const struct dml2_configuration_options *config) { - memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities)); + bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable; + + /* ODM options */ + pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; + pmo_options->disable_dyn_odm_for_multi_stream = true; + pmo_options->disable_dyn_odm_for_stream_with_svp = true; + + pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); + + /* NOTE: DRR and SubVP Require FAMS2 */ + pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || + in_dc->debug.force_disable_subvp || + disable_fams2; + pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || + disable_fams2; + pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || + disable_fams2; + pmo_options->disable_fams2 = disable_fams2; + + pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || + in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; + pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init, +/* + * Populate dml_init based on default static values in soc bb. The default + * values are for reference and support at least minimal operation of current + * SoC and DCN hardware. The values could be modifed by subsequent override + * functions to reflect our true hardware capability. + */ +static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - const struct dml2_ip_capabilities *ip_caps; - switch (in_dc->ctx->dce_version) { case DCN_VERSION_4_01: + dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; + dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config); + dml_init->soc_bb = dml2_socbb_dcn401; + dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params; + dml_init->ip_caps = dml2_dcn401_max_ip_caps; + break; default: - ip_caps = &dml2_dcn401_max_ip_caps; + memset(dml_init, 0, sizeof(*dml_init)); + DC_ERR("unsupported dcn version for DML21!"); + return; } - - memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities)); } -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, +static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_socbb_params(dml_init, config, in_dc); - else - dml21_external_socbb_params(dml_init, config); + dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; + dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; } -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, +/* + * SMU stands for System Management Unit. It is a power management processor. + * It owns the initialization of dc's clock table and programming of clock values + * based on dc's requests. + * Our clock values in base soc bb is a dummy placeholder. The real clock values + * are retrieved from SMU firmware to dc clock table at runtime. + * This function overrides our dummy placeholder values with real values in dc + * clock table. + */ +static void override_dml_init_with_values_from_smu( + struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_ip_params(dml_init, config, in_dc); - else - dml21_external_ip_params(dml_init, config); -} - -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, const struct dc *in_dc) -{ int i; - const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table; - struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; - struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table; - - /* override clocks if smu is present */ - if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) { - /* dcfclk */ - if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { - dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dcfclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && - dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; - dml_clk_table->dcfclk.num_clk_values = i + 1; - } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; - dml_clk_table->dcfclk.num_clk_values = i; - } + struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table; + + if (!in_dc->clk_mgr->funcs->is_smu_present || + !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) + /* skip if smu is not present */ + return; + + /* dcfclk */ + if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { + dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dcfclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && + dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { + dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; + dml_clk_table->dcfclk.num_clk_values = i + 1; } else { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; + dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.num_clk_values = i; } } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; } + } else { + dml_clk_table->dcfclk.clk_values_khz[i] = 0; } } + } - /* fclk */ - if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { - dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->fclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && - dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { - dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; - dml_clk_table->fclk.num_clk_values = i + 1; - } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; - dml_clk_table->fclk.num_clk_values = i; - } + /* fclk */ + if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { + dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->fclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && + dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { + dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; + dml_clk_table->fclk.num_clk_values = i + 1; } else { - dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; + dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.num_clk_values = i; } } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; } + } else { + dml_clk_table->fclk.clk_values_khz[i] = 0; } } + } - /* uclk */ - if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { - dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->uclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && - dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { - dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; - dml_clk_table->uclk.num_clk_values = i + 1; - } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; - dml_clk_table->uclk.num_clk_values = i; - } + /* uclk */ + if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { + dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->uclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && + dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { + dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; + dml_clk_table->uclk.num_clk_values = i + 1; } else { - dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; + dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.num_clk_values = i; } } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; } + } else { + dml_clk_table->uclk.clk_values_khz[i] = 0; } } + } - /* dispclk */ - if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { - dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dispclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && - dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { - dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; - dml_clk_table->dispclk.num_clk_values = i + 1; - } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; - dml_clk_table->dispclk.num_clk_values = i; - } + /* dispclk */ + if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { + dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dispclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && + dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { + dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; + dml_clk_table->dispclk.num_clk_values = i + 1; } else { - dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; + dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.num_clk_values = i; } } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; } + } else { + dml_clk_table->dispclk.clk_values_khz[i] = 0; } } + } - /* dppclk */ - if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { - dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dppclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && - dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { - dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; - dml_clk_table->dppclk.num_clk_values = i + 1; - } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; - dml_clk_table->dppclk.num_clk_values = i; - } + /* dppclk */ + if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { + dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dppclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && + dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { + dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; + dml_clk_table->dppclk.num_clk_values = i + 1; } else { - dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; + dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.num_clk_values = i; } } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; } + } else { + dml_clk_table->dppclk.clk_values_khz[i] = 0; } } + } - /* dtbclk */ - if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { - dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dtbclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && - dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; - dml_clk_table->dtbclk.num_clk_values = i + 1; - } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; - dml_clk_table->dtbclk.num_clk_values = i; - } + /* dtbclk */ + if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { + dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dtbclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && + dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { + dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; + dml_clk_table->dtbclk.num_clk_values = i + 1; } else { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; + dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.num_clk_values = i; } } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; } + } else { + dml_clk_table->dtbclk.clk_values_khz[i] = 0; } } + } - /* socclk */ - if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { - dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->socclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && - dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { - dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; - dml_clk_table->socclk.num_clk_values = i + 1; - } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; - dml_clk_table->socclk.num_clk_values = i; - } + /* socclk */ + if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { + dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->socclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && + dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { + dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; + dml_clk_table->socclk.num_clk_values = i + 1; } else { - dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; + dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.num_clk_values = i; } } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; } + } else { + dml_clk_table->socclk.clk_values_khz[i] = 0; } } - - /* do not override phyclks for now */ - /* phyclk */ - // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000; - // } - - /* phyclk_d18 */ - // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000; - // } - - /* phyclk_d32 */ - // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000; - // } } +} - dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; - dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; - dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; - dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; +static void override_dml_init_with_values_from_vbios( + struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; + struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; + struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table; - /* override bounding box paramters from VBIOS */ if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0) dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10; @@ -308,32 +293,120 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; } - /* override bounding box paramters from DC config */ - if (in_dc->bb_overrides.sr_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_exit_latency_us = - in_dc->bb_overrides.sr_exit_time_ns / 1000.0; + dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; +} + + +static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + /* + * TODO - There seems to be overlaps between the values overriden from + * dmub and vbios. Investigate and identify the values that DMUB needs + * to own. + */ +// const struct dmub_soc_bb_params *dmub_bb_params = +// (const struct dmub_soc_bb_params *)config->bb_from_dmub; + +// if (dmub_bb_params == NULL) +// return; + +// if (dmub_bb_params->dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us = +// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->dram_clk_change_read_only_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us = +// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0; +// if (dmub_bb_params->dram_clk_change_write_only_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us = +// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0; +// if (dmub_bb_params->fclk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us = +// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->g7_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us = +// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0; +// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us = +// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0; +// if (dmub_bb_params->stutter_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us = +// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us = +// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us = +// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_min_idle_time_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_min_idle_time = +// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0; +// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE +// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us = +// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->type_b_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us = +// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0; +// #else +// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us = +// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->type_b_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us = +// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0; +// #endif +// if (dmub_bb_params->vmin_limit_dispclk_khz > 0) +// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz; +// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0) +// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz; +// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us = +// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0; +} + +static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + if (!config->use_native_soc_bb_construction) { + dml_init->soc_bb = config->external_socbb_ip_params->soc_bb; + dml_init->ip_caps = config->external_socbb_ip_params->ip_params; } - if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = + if (in_dc->bb_overrides.sr_exit_time_ns) + dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us = + in_dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) + dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us = in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - if (in_dc->bb_overrides.dram_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = + if (in_dc->bb_overrides.dram_clock_change_latency_ns) + dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us = in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - if (in_dc->bb_overrides.fclk_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.fclk_change_blackout_us = + if (in_dc->bb_overrides.fclk_clock_change_latency_ns) + dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us = in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0; - } +} - //TODO - // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) { - // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = - // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - // } +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + populate_default_dml_init_params(dml_init, config, in_dc); + + override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc); + + override_dml_init_with_values_from_smu(dml_init, config, in_dc); + + override_dml_init_with_values_from_vbios(dml_init, config, in_dc); + + override_dml_init_with_values_from_dmub(dml_init, config, in_dc); + + override_dml_init_with_values_from_software_policy(dml_init, config, in_dc); } static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) @@ -526,7 +599,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp static void populate_dml21_stream_overrides_from_stream_state( struct dml2_stream_parameters *stream_desc, - struct dc_stream_state *stream) + struct dc_stream_state *stream, + struct dc_stream_status *stream_status) { switch (stream->debug.force_odm_combine_segments) { case 0: @@ -551,7 +625,9 @@ static void populate_dml21_stream_overrides_from_stream_state( if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy || stream->debug.force_odm_combine_segments > 0) stream_desc->overrides.disable_dynamic_odm = true; - stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req; + stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || + stream->hw_cursor_req || + stream_status->mall_stream_config.cursor_size_limit_subvp; } static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode) @@ -723,7 +799,6 @@ static void populate_dml21_surface_config_from_plane_state( switch (plane_state->tiling_info.gfxversion) { case DcGfxVersion7: case DcGfxVersion8: - // Placeholder for programming the array_mode break; case DcGfxVersion9: case DcGfxVersion10: @@ -785,6 +860,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->pixel_format = dml2_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: plane->pixel_format = dml2_444_64; @@ -886,6 +962,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: + default: //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined break; } @@ -910,7 +987,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm } //TODO : Could be possibly moved to a common helper layer. -static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id) +static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id) { int i, j; @@ -918,10 +995,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str return false; for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { - *plane_id = (i << 16) | j; - return true; + if (context->streams[i]->stream_id == stream_id) { + for (j = 0; j < context->stream_status[i].plane_count; j++) { + if (context->stream_status[i].plane_states[j] == plane) { + *plane_id = (i << 16) | j; + return true; + } } } } @@ -944,14 +1023,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d return location; } -static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, +unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context) { unsigned int plane_id; int i = 0; int location = -1; - if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) { + if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) { ASSERT(false); return -1; } @@ -1021,7 +1100,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); - populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]); + populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]); dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed; dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed; @@ -1037,7 +1116,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; } else { for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) { - disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context); + disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_planes++; @@ -1048,7 +1127,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; - if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) + if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; /* apply forced pstate policy */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h index 069b939c672a..9880d3e0398e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h @@ -11,18 +11,18 @@ struct dc_state; struct dcn_watermarks; union dcn_watermark_set; struct pipe_ctx; +struct dc_plane_state; struct dml2_context; struct dml2_configuration_options; struct dml2_initialize_instance_in_out; -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context); void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx); void dml21_map_hw_resources(struct dml2_context *dml_ctx); void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config); void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled); +unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c index 930e86cdb88a..ee721606b883 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c @@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc, /* reset fams2 data */ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); + memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config)); if (dml_ctx->v21.mode_programming.programming->fams2_required) { @@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc, memcpy(static_base_state, &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params, sizeof(union dmub_cmd_fams2_config)); - memcpy(static_sub_state, - &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, - sizeof(union dmub_cmd_fams2_config)); + + if (dc->debug.fams_version.major == 3) { + memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams], + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2, + sizeof(union dmub_fams2_stream_static_sub_state_v2)); + } else { + memcpy(static_sub_state, + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, + sizeof(union dmub_cmd_fams2_config)); + } switch (dc->debug.fams_version.minor) { case 1: diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index be54f0e696ce..03de3cf06ae5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml2_internal_types.h" #include "dml_top.h" #include "dml2_core_dcn4_calcs.h" @@ -11,13 +10,15 @@ #include "dml21_translation_helper.h" #include "dml2_dc_resource_mgmt.h" +#define INVALID -1 + static bool dml21_allocate_memory(struct dml2_context **dml_ctx) { - *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + *dml_ctx = vzalloc(sizeof(struct dml2_context)); if (!(*dml_ctx)) return false; - (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL); + (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance)); if (!((*dml_ctx)->v21.dml_init.dml2_instance)) return false; @@ -27,22 +28,18 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) (*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config; (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; - (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL); + (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming)); if (!((*dml_ctx)->v21.mode_programming.programming)) return false; return true; } -static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) +static void dml21_populate_configuration_options(const struct dc *in_dc, + struct dml2_context *dml_ctx, + const struct dml2_configuration_options *config) { - bool disable_fams2; - struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options; - - /* ODM options */ - pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; - pmo_options->disable_dyn_odm_for_multi_stream = true; - pmo_options->disable_dyn_odm_for_stream_with_svp = true; + dml_ctx->config = *config; /* UCLK P-State options */ if (in_dc->debug.dml21_force_pstate_method) { @@ -52,50 +49,22 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex } else { dml_ctx->config.pmo.force_pstate_method_enable = false; } - - pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); - - /* NOTE: DRR and SubVP Require FAMS2 */ - disable_fams2 = !in_dc->debug.fams2_config.bits.enable; - pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || - in_dc->debug.force_disable_subvp || - disable_fams2; - pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || - disable_fams2; - pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || - disable_fams2; - pmo_options->disable_fams2 = disable_fams2; - - pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || - in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; - pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; - break; - default: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid; - } - (*dml_ctx)->architecture = dml2_architecture_21; + dml_ctx->architecture = dml2_architecture_21; + + dml21_populate_configuration_options(in_dc, dml_ctx, config); - /* Store configuration options */ - (*dml_ctx)->config = *config; + DC_FP_START(); - /*Initialize SOCBB and DCNIP params */ - dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc); + dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc); - /* apply debug overrides */ - dml21_apply_debug_options(in_dc, *dml_ctx, config); + dml2_initialize_instance(&dml_ctx->v21.dml_init); - /*Initialize DML21 instance */ - dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + DC_FP_END(); } bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) @@ -104,15 +73,15 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s if (!dml21_allocate_memory(dml_ctx)) return false; - dml21_init(in_dc, dml_ctx, config); + dml21_init(in_dc, *dml_ctx, config); return true; } void dml21_destroy(struct dml2_context *dml2) { - kfree(dml2->v21.dml_init.dml2_instance); - kfree(dml2->v21.mode_programming.programming); + vfree(dml2->v21.dml_init.dml2_instance); + vfree(dml2->v21.mode_programming.programming); } static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, @@ -203,10 +172,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta } } +static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params) +{ + int dc_plane_idx = 0; + int dml_prog_idx, stream_idx, plane_idx; + struct dml2_per_plane_programming *pln_prog = NULL; + + for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) { + for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) { + dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context); + if (dml_prog_idx == INVALID) { + continue; + } + pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; + mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid; + mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0; + mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1; + mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache; + mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1; + memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0, + pln_prog->mcache_allocation.mcache_x_offsets_plane0, + sizeof(int) * (DML2_MAX_MCACHES + 1)); + memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1, + pln_prog->mcache_allocation.mcache_x_offsets_plane1, + sizeof(int) * (DML2_MAX_MCACHES + 1)); + dc_plane_idx++; + } + } +} + static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) { bool result = false; struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming; + struct dc_mcache_params mcache_params[MAX_PLANES] = {0}; memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg)); memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); @@ -229,7 +228,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s if (!result) return false; + DC_FP_START(); result = dml2_build_mode_programming(mode_programming); + DC_FP_END(); if (!result) return false; @@ -239,6 +240,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state); /* if subvp phantoms are present, expand them into dc context */ dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx); + + if (in_dc->res_pool->funcs->program_mcache_pipe_config) { + //Prepare mcache params for each plane based on mcache output from DML + dml21_prepare_mcache_params(dml_ctx, context, mcache_params); + + //populate mcache regs to each pipe + dml_ctx->config.callbacks.allocate_mcache(context, mcache_params); + } } /* Copy DML CLK, WM and REG outputs to bandwidth context */ @@ -272,22 +281,26 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co mode_support->dml2_instance = dml_init->dml2_instance; dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; + DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); + DC_FP_END(); if (!is_supported) return false; return true; } -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate) +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode) { bool out = false; - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) { + /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) out = dml21_check_mode_support(in_dc, context, dml_ctx); - } else + else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); + return out; } @@ -426,8 +439,12 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming; + DC_FP_START(); + /* need to initialize copied instance for internal references to be correct */ dml2_initialize_instance(&dst_dml_ctx->v21.dml_init); + + DC_FP_END(); } bool dml21_create_copy(struct dml2_context **dst_dml_ctx, @@ -442,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx, return true; } -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { dml21_init(in_dc, dml_ctx, config); } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h index b2075b8c363b..15f92029d2e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h @@ -8,11 +8,13 @@ #include "os_types.h" #include "dml_top_soc_parameter_types.h" +#include "dml_top_display_cfg_types.h" struct dc; struct dc_state; struct dml2_configuration_options; struct dml2_context; +enum dc_validate_mode; /** * dml2_create - Creates dml21_context. @@ -32,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, struct dml2_context *src_dml_ctx); bool dml21_create_copy(struct dml2_context **dst_dml_ctx, struct dml2_context *src_dml_ctx); -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config); +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config); /** * dml21_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX + * will not populate context.res_ctx. * * Based on fast_validate option internally would call: * - * -dml21_mode_check_and_programming - for non fast_validate option + * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option * Calculates if dc_state can be supported on the input display * configuration. If supported, generates the necessary HW * programming for the new dc_state. * - * -dml21_check_mode_support - for fast_validate option + * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option * Calculates if dc_state can be supported for the input display * config. @@ -55,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s * separate dc_states for validation. * Return: True if mode is supported, false otherwise. */ -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate); +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode); /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */ void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); @@ -65,4 +69,67 @@ struct socbb_ip_params_external { struct dml2_ip_capabilities ip_params; struct dml2_soc_bb soc_bb; }; + +/*mcache parameters decided by dml*/ +struct dc_mcache_params { + bool valid; + /* + * For iMALL, dedicated mall mcaches are required (sharing of last + * slice possible), for legacy phantom or phantom without return + * the only mall mcaches need to be valid. + */ + bool requires_dedicated_mall_mcache; + unsigned int num_mcaches_plane0; + unsigned int num_mcaches_plane1; + /* + * Generally, plane0/1 slices must use a disjoint set of caches + * but in some cases the final segement of the two planes can + * use the same cache. If plane0_plane1 is set, then this is + * allowed. + * + * Similarly, the caches allocated to MALL prefetcher are generally + * disjoint, but if mall_prefetch is set, then the final segment + * between the main and the mall pixel requestor can use the same + * cache. + * + * Note that both bits may be set at the same time. + */ + struct { + bool mall_comb_mcache_p0; + bool mall_comb_mcache_p1; + bool plane0_plane1; + } last_slice_sharing; + /* + * A plane is divided into vertical slices of mcaches, + * which wrap on the surface width. + * + * For example, if the surface width is 7680, and split into + * three slices of equal width, the boundary array would contain + * [2560, 5120, 7680] + * + * The assignments are + * 0 = [0 .. 2559] + * 1 = [2560 .. 5119] + * 2 = [5120 .. 7679] + * 0 = [7680 .. INF] + * The final element implicitly is the same as the first, and + * at first seems invalid since it is never referenced (since) + * it is outside the surface. However, its useful when shifting + * (see below). + * + * For any given valid mcache assignment, a shifted version, wrapped + * on the surface width boundary is also assumed to be valid. + * + * For example, shifting [2560, 5120, 7680] by -50 results in + * [2510, 5170, 7630]. + * + * The assignments are now: + * 0 = [0 .. 2509] + * 1 = [2510 .. 5169] + * 2 = [5170 .. 7629] + * 0 = [7630 .. INF] + */ + int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1]; + int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1]; +}; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 25b607e7b726..b05030926ce8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -46,6 +46,7 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; + uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; @@ -156,6 +157,8 @@ struct dml2_dchub_watermark_regs { uint32_t urgent; uint32_t sr_enter; uint32_t sr_exit; + uint32_t sr_enter_z8; + uint32_t sr_exit_z8; uint32_t uclk_pstate; uint32_t fclk_pstate; uint32_t temp_read_or_ppt; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h index 5e1ab6d97640..e8dc6471c0be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h @@ -166,7 +166,7 @@ struct dml2_surface_cfg { enum dml2_swizzle_mode tiling; struct { - unsigned long pitch; + unsigned long pitch; // In elements, two pixels per element in 422 packed format unsigned long width; unsigned long height; } plane0; @@ -222,6 +222,7 @@ struct dml2_composition_cfg { struct { bool enabled; + bool upsp_enabled; struct { double h_ratio; double v_ratio; @@ -385,6 +386,7 @@ struct dml2_plane_parameters { long reserved_vblank_time_ns; unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay unsigned int gpuvm_min_page_size_kbytes; + unsigned int hostvm_min_page_size_kbytes; enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config @@ -425,6 +427,7 @@ struct dml2_stream_parameters { struct dml2_display_cfg { bool gpuvm_enable; + bool ffbm_enable; bool hostvm_enable; // Allocate DET proportionally between streams based on pixel rate diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h index 5f0bc42d1d2f..8c9f414aa6bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h @@ -93,12 +93,15 @@ struct dml2_soc_power_management_parameters { double dram_clk_change_write_only_us; double fclk_change_blackout_us; double g7_ppt_blackout_us; + double g7_temperature_read_blackout_us; double stutter_enter_plus_exit_latency_us; double stutter_exit_latency_us; double z8_stutter_enter_plus_exit_latency_us; double z8_stutter_exit_latency_us; double z8_min_idle_time; double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE]; + double type_b_dram_clk_change_blackout_us; + double type_b_ppt_blackout_us; }; struct dml2_clk_table { @@ -130,6 +133,7 @@ struct dml2_soc_state_table { struct dml2_soc_vmin_clock_limits { unsigned long dispclk_khz; + unsigned long dcfclk_khz; }; struct dml2_soc_bb { @@ -138,6 +142,7 @@ struct dml2_soc_bb { struct dml2_soc_power_management_parameters power_management_parameters; struct dml2_soc_vmin_clock_limits vmin_limit; + double lower_bound_bandwidth_dchub; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h index 0dbf886d8926..98c0234e2f47 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h @@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate { dml2_output_rate_hdmi_rate_6x4 = 9, dml2_output_rate_hdmi_rate_8x4 = 10, dml2_output_rate_hdmi_rate_10x4 = 11, - dml2_output_rate_hdmi_rate_12x4 = 12 + dml2_output_rate_hdmi_rate_12x4 = 12, + dml2_output_rate_hdmi_rate_16x4 = 13, + dml2_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_pmo_options { @@ -279,7 +281,10 @@ struct dml2_per_stream_programming { } phantom_stream; union dmub_cmd_fams2_config fams2_base_params; - union dmub_cmd_fams2_config fams2_sub_params; + union { + union dmub_cmd_fams2_config fams2_sub_params; + union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2; + }; }; //----------------- @@ -674,9 +679,14 @@ struct dml2_display_cfg_programming { // unlimited # of mcache struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES]; + bool failed_prefetch; + bool failed_uclk_pstate; bool failed_mcache_validation; bool failed_dpmm; bool failed_mode_programming; + bool failed_mode_programming_dcfclk; + bool failed_mode_programming_prefetch; + bool failed_mode_programming_flip; bool failed_map_watermarks; } informative; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c index bb863c8c6b39..6ee37386f672 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c @@ -456,10 +456,10 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out) in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0); in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0); in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0); - dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps); - dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps); - dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps); - dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps); + DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps); + DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps); + DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps); + DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps); for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) { in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000); @@ -509,7 +509,7 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out) stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index; in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000; - dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz); + DML_LOG_VERBOSE("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz); if (!((stream_bitmask >> stream_index) & 0x1)) { in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index 4c504cb0e1c5..b9cff2198511 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -54,104 +54,104 @@ static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only) { - dml2_printf("DML: ===================================== \n"); - dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n"); + DML_LOG_VERBOSE("DML: ===================================== \n"); + DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n"); if (!fail_only || support->ScaleRatioAndTapsSupport == 0) - dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport); + DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport); if (!fail_only || support->SourceFormatPixelAndScanSupport == 0) - dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport); + DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport); if (!fail_only || support->ViewportSizeSupport == 0) - dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); + DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1) - dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion); + DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion); if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1) - dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated); + DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated); if (!fail_only || support->BPPForMultistreamNotIndicated == 1) - dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated); + DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated); if (!fail_only || support->MultistreamWithHDMIOreDP == 1) - dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP); + DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP); if (!fail_only || support->ExceededMultistreamSlots == 1) - dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); + DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1) - dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink); + DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink); if (!fail_only || support->NotEnoughLanesForMSO == 1) - dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO); + DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO); if (!fail_only || support->P2IWith420 == 1) - dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420); + DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420); if (!fail_only || support->DSC422NativeNotSupported == 1) - dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); + DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); if (!fail_only || support->DSCSlicesODMModeSupported == 0) - dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); + DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); if (!fail_only || support->NotEnoughDSCUnits == 1) - dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits); + DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits); if (!fail_only || support->NotEnoughDSCSlices == 1) - dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices); + DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices); if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1) - dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); + DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1) - dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); + DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1) - dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported); + DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported); if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0) - dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); + DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1) - dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported); + DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported); if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1) - dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); + DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); if (!fail_only || support->ROBSupport == 0) - dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport); + DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport); if (!fail_only || support->OutstandingRequestsSupport == 0) - dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport); + DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport); if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0) - dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance); + DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance); if (!fail_only || support->DISPCLK_DPPCLK_Support == 0) - dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); + DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); if (!fail_only || support->TotalAvailablePipesSupport == 0) - dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); + DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); if (!fail_only || support->NumberOfOTGSupport == 0) - dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); + DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); if (!fail_only || support->NumberOfHDMIFRLSupport == 0) - dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); + DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); if (!fail_only || support->NumberOfDP2p0Support == 0) - dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); + DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); if (!fail_only || support->EnoughWritebackUnits == 0) - dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); + DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0) - dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); + DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); if (!fail_only || support->WritebackLatencySupport == 0) - dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); + DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); if (!fail_only || support->CursorSupport == 0) - dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport); + DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport); if (!fail_only || support->PitchSupport == 0) - dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport); + DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport); if (!fail_only || support->ViewportExceedsSurface == 1) - dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); + DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); if (!fail_only || support->PrefetchSupported == 0) - dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported); + DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported); if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0) - dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); + DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); if (!fail_only || support->AvgBandwidthSupport == 0) - dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); + DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); if (!fail_only || support->DynamicMetadataSupported == 0) - dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported); + DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported); if (!fail_only || support->VRatioInPrefetchSupported == 0) - dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported); + DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported); if (!fail_only || support->PTEBufferSizeNotExceeded == 0) - dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); + DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0) - dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded); + DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded); if (!fail_only || support->ExceededMALLSize == 1) - dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); + DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); if (!fail_only || support->g6_temp_read_support == 0) - dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); + DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); if (!fail_only || support->ImmediateFlipSupport == 0) - dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); + DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); if (!fail_only || support->LinkCapacitySupport == 0) - dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); + DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); if (!fail_only || support->ModeSupport == 0) - dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport); - dml2_printf("DML: ===================================== \n"); + DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport); + DML_LOG_VERBOSE("DML: ===================================== \n"); } static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg) @@ -179,11 +179,9 @@ static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg } else { out_bpp[k] = 0; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc); - dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable); - dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]); -#endif + DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc); + DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable); + DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]); } } @@ -212,9 +210,7 @@ static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const stru num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes); -#endif + DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes); return num_active_pipes; } @@ -251,7 +247,7 @@ static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg, unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx]; bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]); - dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom); + DML_LOG_VERBOSE("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom); return is_phantom; } @@ -415,19 +411,17 @@ static void CalculateMaxDETAndMinCompressedBufferSize( *nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte)); *MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte; -#if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present); - dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte); - dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte); - dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP); - dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte); - dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte); - dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte); -#endif + DML_LOG_VERBOSE("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present); + DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte); + DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte); + DML_LOG_VERBOSE("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP); + DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte); + DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte); + DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte); if (nomDETInKByteOverrideEnable) { *nomDETInKByte = nomDETInKByteOverrideValue; - dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte); + DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte); } } @@ -502,7 +496,7 @@ static bool dml_is_420(enum dml2_source_format_class source_format) val = 0; break; default: - DML2_ASSERT(0); + DML_ASSERT(0); break; } return val; @@ -535,7 +529,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode else if (sw_mode == dml2_gfx11_sw_256kb_r_x) return 262144; else { - DML2_ASSERT(0); + DML_ASSERT(0); return 256; } } @@ -570,8 +564,8 @@ static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode) sw_mode == dml2_gfx11_sw_256kb_r_x) { version = 11; } else { - dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode); - DML2_ASSERT(0); + DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode); + DML_ASSERT(0); } return version; @@ -645,21 +639,19 @@ static void CalculateBytePerPixelAndBlockSizes( *BytePerPixelY = 2; *BytePerPixelC = 4; } else { - dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat); - DML2_ASSERT(0); + DML_LOG_VERBOSE("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat); + DML_ASSERT(0); } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat); - dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY); - dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC); - dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY); - dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC); - dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y); - dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c); - dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l); - dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c); -#endif + DML_LOG_VERBOSE("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat); + DML_LOG_VERBOSE("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY); + DML_LOG_VERBOSE("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC); + DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY); + DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC); + DML_LOG_VERBOSE("DML::%s: pitch_y = %u\n", __func__, pitch_y); + DML_LOG_VERBOSE("DML::%s: pitch_c = %u\n", __func__, pitch_c); + DML_LOG_VERBOSE("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l); + DML_LOG_VERBOSE("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c); if (dml_get_gfx_version(SurfaceTiling) == 11) { *surf_linear128_l = 0; @@ -703,12 +695,10 @@ static void CalculateBytePerPixelAndBlockSizes( *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY; *BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY); - dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY); - dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC); - dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC); -#endif + DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY); + DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY); + DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC); + DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC); if (dml_get_gfx_version(SurfaceTiling) == 11) { if (SurfaceTiling == dml2_gfx11_sw_linear) { @@ -752,8 +742,8 @@ static void CalculateBytePerPixelAndBlockSizes( } else if (SurfaceTiling == dml2_sw_256kb_2d) { macro_tile_scale = 32; } else { - dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling); - DML2_ASSERT(0); + DML_LOG_VERBOSE("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling); + DML_ASSERT(0); } *MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY; @@ -766,12 +756,10 @@ static void CalculateBytePerPixelAndBlockSizes( } } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY); - dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY); - dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC); - dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC); -#endif + DML_LOG_VERBOSE("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY); + DML_LOG_VERBOSE("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY); + DML_LOG_VERBOSE("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC); + DML_LOG_VERBOSE("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC); } static void CalculateSinglePipeDPPCLKAndSCLThroughput( @@ -860,10 +848,8 @@ static void CalculateSwathWidth( unsigned int surface_width_ub_c; unsigned int surface_height_ub_c; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP); - dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); -#endif + DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP); + DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) { @@ -872,11 +858,9 @@ static void CalculateSwathWidth( SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width); - dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height); - dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width); + DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height); + DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]); MainSurfaceODMMode = ODMMode[k]; @@ -899,13 +883,11 @@ static void CalculateSwathWidth( } } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active); - dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio); - dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode); - dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]); - dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u HActive=%lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active); + DML_LOG_VERBOSE("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode); + DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]); if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format)) { SwathWidthC[k] = SwathWidthY[k] / 2; @@ -934,22 +916,20 @@ static void CalculateSwathWidth( surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c); surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]); -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l); - dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l); - dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c); - dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c); - dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y); - dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c); - dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]); - dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]); - dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]); - dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]); - dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y); - dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c); - dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary); - dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l); + DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l); + DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c); + DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c); + DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y); + DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c); + DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y); + DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c); + DML_LOG_VERBOSE("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary); + DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]); req_per_swath_ub_l[k] = 0; req_per_swath_ub_c[k] = 0; @@ -995,15 +975,12 @@ static void CalculateSwathWidth( } } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]); - dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]); - dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]); - dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]); - dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]); - dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]); -#endif - + DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]); } } @@ -1018,13 +995,11 @@ static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsi if (unb_req_force_en) { unb_req_en = unb_req_force_val && unb_req_ok; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en); - dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val); - dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok); - dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en); -#endif - return (unb_req_en); + DML_LOG_VERBOSE("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en); + DML_LOG_VERBOSE("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val); + DML_LOG_VERBOSE("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok); + DML_LOG_VERBOSE("DML::%s: unb_req_en = %u\n", __func__, unb_req_en); + return unb_req_en; } static void CalculateDETBufferSize( @@ -1054,16 +1029,14 @@ static void CalculateDETBufferSize( bool NextPotentialSurfaceToAssignDETPieceFound; bool MinimizeReallocationSuccess = false; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP); - dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte); - dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); - dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled); - dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte); - dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte); - dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte); - dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte); -#endif + DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP); + DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte); + DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); + DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled); + DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte); + DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte); + DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte); + DML_LOG_VERBOSE("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte); // Note: Will use default det size if that fits 2 swaths if (UnboundedRequestEnabled) { @@ -1092,19 +1065,15 @@ static void CalculateDETBufferSize( l->minDET = l->minDET + ConfigReturnBufferSegmentSizeInkByte; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET); - dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET); - dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe); - dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]); - dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET); + DML_LOG_VERBOSE("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET); + DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]); if (l->minDET_pipe == 0) { l->minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)full_swath_bytes_l[k] + (double)full_swath_bytes_c[k]) / 1024.0, ConfigReturnBufferSegmentSizeInkByte))); -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe); } if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) { @@ -1117,12 +1086,10 @@ static void CalculateDETBufferSize( l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - (ForceSingleDPP ? 1 : DPPPerSurface[k]) * l->minDET_pipe; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]); - dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb); - dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]); - dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]); + DML_LOG_VERBOSE("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte); } if (display_cfg->minimize_det_reallocation) { @@ -1194,14 +1161,12 @@ static void CalculateDETBufferSize( l->TotalBandwidth = l->TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k]; } } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__); + DML_LOG_VERBOSE("DML::%s: --- Before bandwidth adjustment ---\n", __func__); for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { - dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]); } - dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__); -#endif - dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth); + DML_LOG_VERBOSE("DML::%s: --- DET allocation with bandwidth ---\n", __func__); + DML_LOG_VERBOSE("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth); l->BandwidthOfSurfacesNotAssignedDETPiece = l->TotalBandwidth; for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { @@ -1213,10 +1178,8 @@ static void CalculateDETBufferSize( } else { DETPieceAssignedToThisSurfaceAlready[k] = false; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]); - dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece); -#endif + DML_LOG_VERBOSE("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]); + DML_LOG_VERBOSE("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece); } for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) { @@ -1224,22 +1187,18 @@ static void CalculateDETBufferSize( l->NextSurfaceToAssignDETPiece = 0; for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]); - dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]); - dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]); - dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]); - dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece); -#endif + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]); + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]); + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]); + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece); if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound || ReadBandwidthLuma[k] + ReadBandwidthChroma[k] < ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece] + ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece])) { l->NextSurfaceToAssignDETPiece = k; NextPotentialSurfaceToAssignDETPieceFound = true; } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]); - dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound); -#endif + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]); + DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound); } if (NextPotentialSurfaceToAssignDETPieceFound) { @@ -1249,20 +1208,16 @@ static void CalculateDETBufferSize( * (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte, math_floor2((double)l->DETBufferSizePoolInKByte, (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte))); -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte); - dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece); - dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]); - dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]); - dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece); - dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte); - dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]); -#endif + DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte); + DML_LOG_VERBOSE("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece); + DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]); + DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]); + DML_LOG_VERBOSE("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece); + DML_LOG_VERBOSE("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte); + DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]); DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] = DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] + l->NextDETBufferPieceInKByte / (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]); -#ifdef __DML_VBA_DEBUG__ - dml2_printf("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]); -#endif + DML_LOG_VERBOSE("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]); l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - l->NextDETBufferPieceInKByte; DETPieceAssignedToThisSurfaceAlready[l->NextSurfaceToAssignDETPiece] = true; @@ -1274,13 +1229,11 @@ static void CalculateDETBufferSize( } *CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByte / ConfigReturnBufferSegmentSizeInkByte; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__); - dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte); + DML_LOG_VERBOSE("DML::%s: --- After bandwidth adjustment ---\n", __func__); + DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte); for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { - dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]); } -#endif } static double CalculateRequiredDispclk( @@ -1510,15 +1463,13 @@ static unsigned int dscceComputeDelay( //pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format) pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: bpc: %u\n", __func__, bpc); - dml2_printf("DML::%s: BPP: %f\n", __func__, BPP); - dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth); - dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices); - dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat); - dml2_printf("DML::%s: Output: %u\n", __func__, Output); - dml2_printf("DML::%s: pixels: %u\n", __func__, pixels); -#endif + DML_LOG_VERBOSE("DML::%s: bpc: %u\n", __func__, bpc); + DML_LOG_VERBOSE("DML::%s: BPP: %f\n", __func__, BPP); + DML_LOG_VERBOSE("DML::%s: sliceWidth: %u\n", __func__, sliceWidth); + DML_LOG_VERBOSE("DML::%s: numSlices: %u\n", __func__, numSlices); + DML_LOG_VERBOSE("DML::%s: pixelFormat: %u\n", __func__, pixelFormat); + DML_LOG_VERBOSE("DML::%s: Output: %u\n", __func__, Output); + DML_LOG_VERBOSE("DML::%s: pixels: %u\n", __func__, pixels); return pixels; } @@ -1593,10 +1544,8 @@ static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, e // sft Delay = Delay + 1; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat); - dml2_printf("DML::%s: Delay = %u\n", __func__, Delay); -#endif + DML_LOG_VERBOSE("DML::%s: pixelFormat = %u\n", __func__, pixelFormat); + DML_LOG_VERBOSE("DML::%s: Delay = %u\n", __func__, Delay); return Delay; } @@ -1667,10 +1616,8 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ } meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0); -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch); - dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes); -#endif + DML_LOG_VERBOSE("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch); + DML_LOG_VERBOSE("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes); if (p->GPUVMEnable == true) { double meta_vmpg_bytes = 4.0 * 1024.0; *p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double) (meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64); @@ -1724,25 +1671,23 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable); - dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable); - dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear); - dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel); - dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels); - dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes); - dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes); - dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight); - dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth); - dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub); - dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub); - dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes); - dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes); - dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes); - dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight); - dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth); - dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub); -#endif + DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable); + DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable); + DML_LOG_VERBOSE("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear); + DML_LOG_VERBOSE("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel); + DML_LOG_VERBOSE("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels); + DML_LOG_VERBOSE("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes); + DML_LOG_VERBOSE("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes); + DML_LOG_VERBOSE("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight); + DML_LOG_VERBOSE("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth); + DML_LOG_VERBOSE("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub); + DML_LOG_VERBOSE("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub); + DML_LOG_VERBOSE("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes); + DML_LOG_VERBOSE("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes); + DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes); + DML_LOG_VERBOSE("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight); + DML_LOG_VERBOSE("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth); + DML_LOG_VERBOSE("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub); if (p->SurfaceTiling == dml2_sw_linear) { *p->PixelPTEReqHeight = 1; @@ -1778,22 +1723,20 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ *p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel); if (p->GPUVMEnable == true) { - dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n", + DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n", __func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling)); - DML2_ASSERT(0); + DML_ASSERT(0); } } -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes); - dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight); - dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth); - dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear); - dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize); - dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch); - dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width); - dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height); -#endif + DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes); + DML_LOG_VERBOSE("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight); + DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth); + DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear); + DML_LOG_VERBOSE("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize); + DML_LOG_VERBOSE("DML::%s: Pitch = %u\n", __func__, p->Pitch); + DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width); + DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height); *p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub; *p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth); @@ -1811,7 +1754,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ *p->dpte_row_height_linear = 128; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub); + DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub); #endif } else if (!dml_is_vertical_rotation(p->RotationAngle)) { @@ -1825,7 +1768,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ *p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth); } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub); + DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub); #endif *p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize; @@ -1840,7 +1783,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ *p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub); + DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub); #endif } @@ -1852,18 +1795,18 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_ *p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes); - dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable); - dml2_printf("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height); - dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height); - dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear); - dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub); - dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow); - dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage); - dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests); - dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame); - dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame); - dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame); + DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes); + DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable); + DML_LOG_VERBOSE("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height); + DML_LOG_VERBOSE("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height); + DML_LOG_VERBOSE("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear); + DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub); + DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow); + DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage); + DML_LOG_VERBOSE("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests); + DML_LOG_VERBOSE("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame); + DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame); + DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame); #endif return vm_bytes; @@ -1894,12 +1837,12 @@ static unsigned int CalculatePrefetchSourceLines( double numLines = 0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio); - dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps); - dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart); - dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart); - dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary); - dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight); + DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio); + DML_LOG_VERBOSE("DML::%s: VTaps = %u\n", __func__, VTaps); + DML_LOG_VERBOSE("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart); + DML_LOG_VERBOSE("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart); + DML_LOG_VERBOSE("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary); + DML_LOG_VERBOSE("DML::%s: SwathHeight = %u\n", __func__, SwathHeight); #endif if (ProgressiveToInterlaceUnitInOPP) *VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1)); @@ -1934,11 +1877,11 @@ static unsigned int CalculatePrefetchSourceLines( numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot); - dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill); - dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath); - dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath); - dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines); + DML_LOG_VERBOSE("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot); + DML_LOG_VERBOSE("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill); + DML_LOG_VERBOSE("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath); + DML_LOG_VERBOSE("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath); + DML_LOG_VERBOSE("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines); #endif return (unsigned int)(numLines); @@ -2007,8 +1950,8 @@ static void CalculateMALLUseForStaticScreen( if (is_using_mall_for_ss[k]) TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]); - dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL); + DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL); #endif } @@ -2022,7 +1965,7 @@ static void CalculateMALLUseForStaticScreen( (!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) { CanAddAnotherSurfaceToMALL = true; SurfaceToAddToMALL = k; - dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall); + DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall); } } if (CanAddAnotherSurfaceToMALL) { @@ -2030,8 +1973,8 @@ static void CalculateMALLUseForStaticScreen( TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL); - dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL); + DML_LOG_VERBOSE("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL); + DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL); #endif } } @@ -2203,15 +2146,15 @@ static void CalculateDCCConfiguration( segment_order_vert_contiguous_chroma = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled); - dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte); - dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC); - dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l); - dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c); - dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l); - dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c); - dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma); - dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma); + DML_LOG_VERBOSE("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled); + DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte); + DML_LOG_VERBOSE("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC); + DML_LOG_VERBOSE("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l); + DML_LOG_VERBOSE("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c); + DML_LOG_VERBOSE("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l); + DML_LOG_VERBOSE("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c); + DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma); + DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma); #endif if (DCCProgrammingAssumesScanDirectionUnknown == true) { if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) { @@ -2301,12 +2244,12 @@ static void CalculateDCCConfiguration( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma); - dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma); - dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma); - dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma); - dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma); - dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma); + DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma); + DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma); + DML_LOG_VERBOSE("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma); + DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma); + DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma); + DML_LOG_VERBOSE("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma); #endif } @@ -2326,26 +2269,26 @@ static void calculate_mcache_row_bytes( unsigned int mvmpg_per_mcache; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans); - dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes); - dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes); - dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes); - dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable); - dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes); - dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary); - dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode); - dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x); - dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y); - dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width); - dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height); - dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width); - dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height); - dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width); - dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height); - dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes); -#endif - DML2_ASSERT(p->mcache_line_size_bytes != 0); - DML2_ASSERT(p->mcache_size_bytes != 0); + DML_LOG_VERBOSE("DML::%s: num_chans = %u\n", __func__, p->num_chans); + DML_LOG_VERBOSE("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes); + DML_LOG_VERBOSE("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes); + DML_LOG_VERBOSE("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes); + DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable); + DML_LOG_VERBOSE("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes); + DML_LOG_VERBOSE("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary); + DML_LOG_VERBOSE("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode); + DML_LOG_VERBOSE("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x); + DML_LOG_VERBOSE("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y); + DML_LOG_VERBOSE("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width); + DML_LOG_VERBOSE("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height); + DML_LOG_VERBOSE("DML::%s: blk_width = %u\n", __func__, p->blk_width); + DML_LOG_VERBOSE("DML::%s: blk_height = %u\n", __func__, p->blk_height); + DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width); + DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height); + DML_LOG_VERBOSE("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes); +#endif + DML_ASSERT(p->mcache_line_size_bytes != 0); + DML_ASSERT(p->mcache_size_bytes != 0); *p->mvmpg_width = 0; *p->mvmpg_height = 0; @@ -2370,8 +2313,8 @@ static void calculate_mcache_row_bytes( *p->mvmpg_width = p->vmpg_width; *p->mvmpg_height = p->vmpg_height; } else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) { - dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__); - DML2_ASSERT(0); + DML_LOG_VERBOSE("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__); + DML_ASSERT(0); } } @@ -2439,25 +2382,25 @@ static void calculate_mcache_row_bytes( *p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable); - dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes); - dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes); - dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel); - dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub); - dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub); - dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width); - dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height); - dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor); - dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor); + DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable); + DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes); + DML_LOG_VERBOSE("DML::%s: blk_bytes = %u\n", __func__, blk_bytes); + DML_LOG_VERBOSE("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel); + DML_LOG_VERBOSE("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub); + DML_LOG_VERBOSE("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub); + DML_LOG_VERBOSE("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width); + DML_LOG_VERBOSE("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height); + DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor); + DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor); #endif } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes); - dml2_printf("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel); - dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches); + DML_LOG_VERBOSE("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes); + DML_LOG_VERBOSE("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel); + DML_LOG_VERBOSE("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches); #endif - DML2_ASSERT(*p->num_mcaches > 0); + DML_ASSERT(*p->num_mcaches > 0); } static void calculate_mcache_setting( @@ -2523,7 +2466,7 @@ static void calculate_mcache_setting( l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l; calculate_mcache_row_bytes(scratch, &l->l_p); - DML2_ASSERT(*p->num_mcaches_l > 0); + DML_ASSERT(*p->num_mcaches_l > 0); if (l->is_dual_plane) { l->c_p.num_chans = p->num_chans; @@ -2559,7 +2502,7 @@ static void calculate_mcache_setting( l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c; calculate_mcache_row_bytes(scratch, &l->c_p); - DML2_ASSERT(*p->num_mcaches_c > 0); + DML_ASSERT(*p->num_mcaches_c > 0); } // Sharing for iMALL access @@ -2598,28 +2541,28 @@ static void calculate_mcache_setting( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable); - dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane); - dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert); - dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l); - dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l); - dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l); - dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l); - dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l); - dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l); - dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l); + DML_LOG_VERBOSE("DML::%s: imall_enable = %u\n", __func__, p->imall_enable); + DML_LOG_VERBOSE("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane); + DML_LOG_VERBOSE("DML::%s: surf_vert = %u\n", __func__, p->surf_vert); + DML_LOG_VERBOSE("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l); + DML_LOG_VERBOSE("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l); + DML_LOG_VERBOSE("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l); + DML_LOG_VERBOSE("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l); + DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l); + DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l); + DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l); if (l->is_dual_plane) { - dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c); - dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c); - dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c); - dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor); - dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c); - dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c); - dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c); - dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c); - dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size); - dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache); + DML_LOG_VERBOSE("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c); + DML_LOG_VERBOSE("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c); + DML_LOG_VERBOSE("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c); + DML_LOG_VERBOSE("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor); + DML_LOG_VERBOSE("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c); + DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c); + DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c); + DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c); + DML_LOG_VERBOSE("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size); + DML_LOG_VERBOSE("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache); } #endif // calculate split_coordinate @@ -2639,11 +2582,11 @@ static void calculate_mcache_setting( } #ifdef __DML_VBA_DEBUG__ for (n = 0; n < *p->num_mcaches_l; n++) - dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]); + DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]); if (l->is_dual_plane) { for (n = 0; n < *p->num_mcaches_c; n++) - dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]); + DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]); } #endif @@ -2660,10 +2603,10 @@ static void calculate_mcache_setting( #ifdef __DML_VBA_DEBUG__ for (n = 0; n < *p->num_mcaches_l; n++) - dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]); + DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]); for (n = 0; n < *p->num_mcaches_c; n++) - dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]); + DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]); #endif } @@ -2694,8 +2637,8 @@ static void calculate_mall_bw_overhead_factor( mall_prefetch_dram_overhead_factor[k] = 2.0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]); - dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]); #endif } } @@ -2772,22 +2715,20 @@ static double dml_get_return_bandwidth_available( else // dml2_core_internal_bw_dram return_bw_mbps = derate_dram_bandwidth; -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw); - dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en); - dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only); - dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type)); - dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type)); - dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz); - dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz); - dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth); - dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth); - dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth); - dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor); - dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor); - dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor); - dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps); -#endif + DML_LOG_VERBOSE("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw); + DML_LOG_VERBOSE("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en); + DML_LOG_VERBOSE("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only); + DML_LOG_VERBOSE("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type)); + DML_LOG_VERBOSE("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type)); + DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz); + DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz); + DML_LOG_VERBOSE("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth); + DML_LOG_VERBOSE("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth); + DML_LOG_VERBOSE("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth); + DML_LOG_VERBOSE("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor); + DML_LOG_VERBOSE("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor); + DML_LOG_VERBOSE("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor); + DML_LOG_VERBOSE("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps); return return_bw_mbps; } @@ -2807,9 +2748,9 @@ static noinline_for_stack void calculate_bandwidth_available( { unsigned int n, m; - dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz); - dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz); - dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps); + DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz); + DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz); + DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps); // Calculate all the bandwidth availabe for (m = 0; m < dml2_core_internal_soc_state_max; m++) { @@ -2828,8 +2769,8 @@ static noinline_for_stack void calculate_bandwidth_available( #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]); - dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]); #endif // urg_bandwidth_available_vm_only is indexed by soc_state @@ -2843,9 +2784,9 @@ static noinline_for_stack void calculate_bandwidth_available( urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]); - dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]); - dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]); #endif } } @@ -2879,13 +2820,13 @@ static void calculate_avg_bandwidth_required( // SysActive and SVP Prefetch AVG bandwidth Check for (k = 0; k < num_active_planes; ++k) { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: plane %0d\n", __func__, k); - dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]); - dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]); - dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]); - dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]); - dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]); - dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]); + DML_LOG_VERBOSE("DML::%s: plane %0d\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]); + DML_LOG_VERBOSE("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]); + DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]); + DML_LOG_VERBOSE("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]); + DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]); #endif sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k]; @@ -2902,10 +2843,10 @@ static void calculate_avg_bandwidth_required( avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]); - dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]); - dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]); - dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]); #endif } } @@ -3080,10 +3021,10 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, &p->MaxNumSwathY[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l); - dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c); - dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]); - dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l); + DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c); + DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]); #endif p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels); p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k]; @@ -3091,8 +3032,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, p->meta_row_bytes_per_row_ub_c[k] = s->meta_row_bytes_per_row_ub_c[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]); - dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]); #endif if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) { p->PTEBufferSizeNotExceeded[k] = true; @@ -3104,18 +3045,18 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]); #ifdef __DML_VBA_DEBUG__ if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) { - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]); - dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]); - dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]); - dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]); - dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]); - dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]); } #endif } @@ -3146,8 +3087,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { p->DCCMetaBufferSizeNotExceeded[k] = true; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]); - dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]); #endif p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) || (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle)); @@ -3170,9 +3111,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, p->DCCMetaBufferSizeNotExceeded[k] = false; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]); - dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes); - dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes); + DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]); #endif } @@ -3209,20 +3150,20 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch, &p->dpte_row_bw[k], &p->meta_row_bw[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]); - dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]); - dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config); - dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]); - dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]); - dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]); - dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]); - dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]); - dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]); - dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable); - dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]); - dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable); + DML_LOG_VERBOSE("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]); #endif } } @@ -3257,19 +3198,19 @@ static double CalculateUrgentLatency( } #ifdef __DML_VBA_DEBUG__ if (qos_type == dml2_qos_param_type_dcn4x) { - dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); - dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles); - dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); - dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin); + DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type); + DML_LOG_VERBOSE("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles); + DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin); } else { - dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly); - dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData); - dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly); - dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent); - dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference); + DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly); + DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData); + DML_LOG_VERBOSE("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly); + DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent); + DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference); } - dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock); - dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency); + DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock); + DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency); #endif return urgent_latency; } @@ -3296,18 +3237,18 @@ static double CalculateTripToMemory( #ifdef __DML_VBA_DEBUG__ if (qos_type == dml2_qos_param_type_dcn4x) { - dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); - dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles); - dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles); - dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles); - dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); - dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock); - dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin); - dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin); + DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type); + DML_LOG_VERBOSE("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles); + DML_LOG_VERBOSE("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles); + DML_LOG_VERBOSE("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles); + DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock); + DML_LOG_VERBOSE("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin); + DML_LOG_VERBOSE("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin); } else { - dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency); + DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency); } - dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us); + DML_LOG_VERBOSE("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us); #endif @@ -3334,14 +3275,14 @@ static double CalculateMetaTripToMemory( #ifdef __DML_VBA_DEBUG__ if (qos_type == dml2_qos_param_type_dcn4x) { - dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type); - dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles); - dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles); - dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type); + DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles); + DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles); + DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz); } else { - dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency); + DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency); } - dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us); + DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us); #endif @@ -3358,7 +3299,6 @@ static void calculate_cursor_req_attributes( unsigned int *cursor_bytes_per_chunk, unsigned int *cursor_bytes) { - unsigned int cursor_pitch = 0; unsigned int cursor_bytes_per_req = 0; unsigned int cursor_width_bytes = 0; unsigned int cursor_height = 0; @@ -3366,10 +3306,6 @@ static void calculate_cursor_req_attributes( //SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply. //- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B //- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width - if (cursor_bpp == 2) - cursor_pitch = 256; - else - cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1); //The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows. @@ -3409,8 +3345,8 @@ static void calculate_cursor_req_attributes( *cursor_lines_per_chunk = 1; } else { if (cursor_width > 0) { - dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp); - DML2_ASSERT(0); + DML_LOG_VERBOSE("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp); + DML_ASSERT(0); } } @@ -3421,15 +3357,15 @@ static void calculate_cursor_req_attributes( cursor_height = cursor_width; *cursor_bytes = *cursor_bytes_per_line * cursor_height; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp); - dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width); - dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes); - dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req); - dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk); - dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line); - dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk); - dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes); - dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch); + DML_LOG_VERBOSE("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp); + DML_LOG_VERBOSE("DML::%s: cursor_width = %d\n", __func__, cursor_width); + DML_LOG_VERBOSE("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes); + DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req); + DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk); + DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line); + DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk); + DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes); + DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1)); #endif } @@ -3460,13 +3396,13 @@ static void calculate_cursor_urgent_burst_factor( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer); - dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime); - dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize); - dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk); - dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk); - dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor); - dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding); + DML_LOG_VERBOSE("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer); + DML_LOG_VERBOSE("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime); + DML_LOG_VERBOSE("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize); + DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk); + DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk); + DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor); + DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding); #endif } @@ -3501,15 +3437,15 @@ static void CalculateUrgentBurstFactor( *UrgentBurstFactorChroma = 0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio); - dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC); - dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY); - dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC); - dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY); - dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub); - dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime); + DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio); + DML_LOG_VERBOSE("DML::%s: VRatioC = %f\n", __func__, VRatioC); + DML_LOG_VERBOSE("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY); + DML_LOG_VERBOSE("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC); + DML_LOG_VERBOSE("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY); + DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub); + DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime); #endif - DML2_ASSERT(VRatio > 0); + DML_ASSERT(VRatio > 0); LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub; @@ -3534,12 +3470,12 @@ static void CalculateUrgentBurstFactor( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma); - dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); - dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma); - dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma); - dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma); - dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding); + DML_LOG_VERBOSE("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma); + DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); + DML_LOG_VERBOSE("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma); + DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma); + DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma); + DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding); #endif } @@ -3600,10 +3536,10 @@ static void CalculateDCFCLKDeepSleepTdlut( if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) { double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k]; - dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]); - dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]); - dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]); - dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk); + DML_LOG_VERBOSE("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk); // increase the deepsleep dcfclk to match the original dispclk throughput rate if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) { @@ -3613,8 +3549,8 @@ static void CalculateDCFCLKDeepSleepTdlut( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz); - dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz); + DML_LOG_VERBOSE("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]); #endif } @@ -3625,17 +3561,17 @@ static void CalculateDCFCLKDeepSleepTdlut( *DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__); - dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth); - dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth); - dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep); + DML_LOG_VERBOSE("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__); + DML_LOG_VERBOSE("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth); + DML_LOG_VERBOSE("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth); + DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep); #endif for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { *DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]); } - dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep); + DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep); } static noinline_for_stack void CalculateDCFCLKDeepSleep( @@ -3731,12 +3667,12 @@ static unsigned int CalculateMaxVStartup( else max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom); - dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us); - dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us); - dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual); - dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail); - dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines); + DML_LOG_VERBOSE("DML::%s: VBlankNom = %lu\n", __func__, timing->vblank_nom); + DML_LOG_VERBOSE("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us); + DML_LOG_VERBOSE("DML::%s: line_time_us = %f\n", __func__, line_time_us); + DML_LOG_VERBOSE("DML::%s: vblank_actual = %u\n", __func__, vblank_actual); + DML_LOG_VERBOSE("DML::%s: vblank_avail = %u\n", __func__, vblank_avail); + DML_LOG_VERBOSE("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines); #endif max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START); return max_vstartup_lines; @@ -3761,9 +3697,9 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch const long MAXIMUMCOMPRESSION = 4; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP); + DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP); for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) { - dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]); + DML_LOG_VERBOSE("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]); } #endif CalculateSwathWidth( @@ -3797,15 +3733,15 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * MaximumSwathHeightY[k]); p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * MaximumSwathHeightC[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]); - dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]); - dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]); - dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]); - dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]); - dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]); - dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]); - dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]); - dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]); + DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]); #endif if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) { p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256)); @@ -3848,11 +3784,11 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch p->CompressedBufferSizeInkByte); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP); - dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte); - dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte); - dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled); - dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte); + DML_LOG_VERBOSE("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP); + DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte); + DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte); + DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled); + DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte); #endif *p->ViewportSizeSupport = true; @@ -3860,7 +3796,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation); #endif if (p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) { p->SwathHeightY[k] = MaximumSwathHeightY[k]; @@ -3917,13 +3853,13 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) || p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) { *p->ViewportSizeSupport = false; - dml2_printf("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]); - dml2_printf("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]); - dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation); - dml2_printf("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]); - dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]); - dml2_printf("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]); - dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation); + DML_LOG_VERBOSE("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]); p->ViewportSizeSupportPerSurface[k] = false; } else { p->ViewportSizeSupportPerSurface[k] = true; @@ -3931,35 +3867,35 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch if (p->SwathHeightC[k] == 0) { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k); #endif p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024; p->DETBufferSizeC[k] = 0; } else if (RoundedUpSwathSizeBytesY[k] <= 1.5 * RoundedUpSwathSizeBytesC[k]) { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k); #endif p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2; p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2; } else { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k); #endif p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024)); p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k]; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]); - dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]); - dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]); - dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]); - dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]); - dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]); - dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]); - dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]); - dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]); - dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]); + DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]); #endif } @@ -3969,12 +3905,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch *p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b, (double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]); - dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes); + DML_LOG_VERBOSE("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]); + DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes); #endif } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b); + DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b); #endif *p->hw_debug5 = false; @@ -3989,12 +3925,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch + *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k]))) *p->hw_debug5 = true; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled); - dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION); - dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH); - dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte); - dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]); - dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5); + DML_LOG_VERBOSE("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled); + DML_LOG_VERBOSE("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION); + DML_LOG_VERBOSE("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH); + DML_LOG_VERBOSE("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte); + DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5); #endif } #endif @@ -4192,15 +4128,15 @@ static noinline_for_stack void CalculateODMMode( SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock); SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse); - dml2_printf("DML::%s: Output = %d\n", __func__, Output); - dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable); - dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk); - dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit); - dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine); - dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne); - dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne); - dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne); + DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse); + DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output); + DML_LOG_VERBOSE("DML::%s: DSCEnable = %d\n", __func__, DSCEnable); + DML_LOG_VERBOSE("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk); + DML_LOG_VERBOSE("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit); + DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine); + DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne); + DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne); + DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne); #endif if (ODMUse == dml2_odm_mode_auto) DecidedODMMode = DecideODMMode(HActive, @@ -4245,10 +4181,10 @@ static noinline_for_stack void CalculateODMMode( *NumberOfDPP = NumberOfDPPRequired; *RequiredDISPCLKPerSurface = success ? DISPCLKRequired : 0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode); - dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP); - dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport); - dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface); + DML_LOG_VERBOSE("DML::%s: ODMMode = %d\n", __func__, *ODMMode); + DML_LOG_VERBOSE("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP); + DML_LOG_VERBOSE("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport); + DML_LOG_VERBOSE("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface); #endif } @@ -4292,17 +4228,17 @@ static noinline_for_stack void CalculateOutputLink( *OutputRate = dml2_core_internal_output_rate_unknown; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable); - dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK); - dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd); - dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate); - dml2_printf("DML::%s: HActive = %u\n", __func__, HActive); - dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal); - dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC); - dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC); - dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP); - dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output); - dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate); + DML_LOG_VERBOSE("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable); + DML_LOG_VERBOSE("DML::%s: PHYCLK = %f\n", __func__, PHYCLK); + DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd); + DML_LOG_VERBOSE("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate); + DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive); + DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal); + DML_LOG_VERBOSE("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC); + DML_LOG_VERBOSE("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC); + DML_LOG_VERBOSE("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP); + DML_LOG_VERBOSE("DML::%s: Output (encoder) = %u\n", __func__, Output); + DML_LOG_VERBOSE("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate); #endif { if (Output == dml2_hdmi) { @@ -4487,9 +4423,9 @@ static noinline_for_stack void CalculateOutputLink( } } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC); - dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC); - dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp); + DML_LOG_VERBOSE("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC); + DML_LOG_VERBOSE("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC); + DML_LOG_VERBOSE("DML::%s: OutBpp = %f\n", __func__, *OutBpp); #endif } @@ -4571,17 +4507,17 @@ static unsigned int DSCDelayRequirement( DSCDelayRequirement_val = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled); - dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode); - dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp); - dml2_printf("DML::%s: HActive = %u\n", __func__, HActive); - dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal); - dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock); - dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd); - dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat); - dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent); - dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices); - dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val); + DML_LOG_VERBOSE("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled); + DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, ODMMode); + DML_LOG_VERBOSE("DML::%s: OutputBpp = %f\n", __func__, OutputBpp); + DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive); + DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal); + DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock); + DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd); + DML_LOG_VERBOSE("DML::%s: OutputFormat = %u\n", __func__, OutputFormat); + DML_LOG_VERBOSE("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent); + DML_LOG_VERBOSE("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices); + DML_LOG_VERBOSE("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val); #endif return DSCDelayRequirement_val; @@ -4654,10 +4590,10 @@ static void CalculateSurfaceSizeInMall( (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024); - dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP); - dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS); - dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize); + DML_LOG_VERBOSE("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024); + DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP); + DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS); + DML_LOG_VERBOSE("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize); #endif } @@ -4674,7 +4610,6 @@ static void calculate_tdlut_setting( unsigned int tdlut_vmpg_per_frame; unsigned int tdlut_pte_req_per_frame; unsigned int tdlut_bytes_per_line; - unsigned int tdlut_delivery_cycles; double tdlut_drain_rate; unsigned int tdlut_mpc_width; unsigned int tdlut_bytes_per_group_simple; @@ -4737,44 +4672,47 @@ static void calculate_tdlut_setting( *p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width; *p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width; //the delivery cycles is DispClk cycles per line * number of lines * number of slices - tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width; + //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width; tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1); } else { //tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements *p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256); *p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple; - tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1); + //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1); tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz; } //the tdlut is fetched during the 2 row times of prefetch. if (p->setup_for_tdlut) { *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1); - *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; + if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024) + *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; + else + *p->tdlut_opt_time = 0; *p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate; *p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0); } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable); - dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes); - dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame); - dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame); + DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable); + DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes); + DML_LOG_VERBOSE("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame); + DML_LOG_VERBOSE("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame); - dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz); - dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width); - dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear"); - dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes); - dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes); - dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame); - dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line); - dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group); - dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate); - dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles); - dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time); - dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time); - dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver); - dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub); + DML_LOG_VERBOSE("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz); + DML_LOG_VERBOSE("DML::%s: tdlut_width = %u\n", __func__, tdlut_width); + DML_LOG_VERBOSE("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear"); + DML_LOG_VERBOSE("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes); + DML_LOG_VERBOSE("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes); + DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame); + DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line); + DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group); + DML_LOG_VERBOSE("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate); + DML_LOG_VERBOSE("DML::%s: tdlut_delivery_cycles = %u\n", __func__, p->tdlut_addressing_mode == dml2_tdlut_sw_linear ? (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width : (unsigned int)math_ceil2(tdlut_width/2.0, 1)); + DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time); + DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time); + DML_LOG_VERBOSE("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver); + DML_LOG_VERBOSE("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub); #endif } @@ -4820,10 +4758,10 @@ static void CalculateTarb( *Tarb = extra_bytes / ReturnBW; *Tarb_prefetch = extra_bytes_prefetch / ReturnBW; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte); - dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize); - dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes); - dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch); + DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte); + DML_LOG_VERBOSE("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize); + DML_LOG_VERBOSE("DML::%s: extra_bytes = %f\n", __func__, extra_bytes); + DML_LOG_VERBOSE("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch); #endif } @@ -4838,10 +4776,10 @@ static double CalculateTWait( TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns); - dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); - dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip); - dml2_printf("DML::%s: TWait = %f\n", __func__, TWait); + DML_LOG_VERBOSE("DML::%s: reserved_vblank_time_ns = %ld\n", __func__, reserved_vblank_time_ns); + DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); + DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, Ttrip); + DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, TWait); #endif return TWait; } @@ -4887,20 +4825,20 @@ static void CalculateVUpdateAndDynamicMetadataParameters( *Tdmsks = *Tdmsks / 2; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired); - dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank); - dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal); - dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock); - dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk); - dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep); - dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters); - dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime); + DML_LOG_VERBOSE("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired); + DML_LOG_VERBOSE("DML::%s: VBlank = %u\n", __func__, VBlank); + DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal); + DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock); + DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, Dppclk); + DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep); + DML_LOG_VERBOSE("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters); + DML_LOG_VERBOSE("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime); - dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix); - dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix); - dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix); + DML_LOG_VERBOSE("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix); + DML_LOG_VERBOSE("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix); + DML_LOG_VERBOSE("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix); - dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks); + DML_LOG_VERBOSE("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks); #endif } @@ -4923,7 +4861,7 @@ static double get_urgent_bandwidth_required( double ReadBandwidthChroma[], double PrefetchBandwidthLuma[], double PrefetchBandwidthChroma[], - double PrefetchBandwidthOto[], + double PrefetchBandwidthMax[], double excess_vactive_fill_bw_l[], double excess_vactive_fill_bw_c[], double cursor_bw[], @@ -4962,11 +4900,11 @@ static double get_urgent_bandwidth_required( l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k]; bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]); - bool exclude_this_plane = 0; + bool exclude_this_plane = false; // Exclude phantom pipe in bw calculation for non svp prefetch state if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom) - exclude_this_plane = 1; + exclude_this_plane = true; // The qualified row bandwidth, qual_row_bw, accounts for the regular non-flip row bandwidth when there is no possible immediate flip or HostVM invalidation flip. // The qual_row_bw is zero if HostVM is possible and only non-zero and equal to row_bw(i) if immediate flip is not allowed for that pipe. @@ -4987,20 +4925,20 @@ static double get_urgent_bandwidth_required( l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k]; l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur; l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; - l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; + l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]; - surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto); + surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max); /* export peak required bandwidth for the surface */ surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw); - dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw); - dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw); - dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw); - dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]); - dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw); + DML_LOG_VERBOSE("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw); + DML_LOG_VERBOSE("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw); + DML_LOG_VERBOSE("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw); + DML_LOG_VERBOSE("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]); #endif } else { surface_required_bw[k] = 0.0; @@ -5009,34 +4947,34 @@ static double get_urgent_bandwidth_required( l->required_bandwidth_mbps += surface_required_bw[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]); - dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw); - dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip); - dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor); - dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0); - dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1); - dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur); + DML_LOG_VERBOSE("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw); + DML_LOG_VERBOSE("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip); + DML_LOG_VERBOSE("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor); + DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0); + DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1); + DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur); - dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre); - dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre); - dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre); + DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre); + DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre); + DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre); - dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]); - dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]); - dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]); - dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]); - dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]); - dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]); - dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]); - dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]); - dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]); - dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]); - dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]); - dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]); - dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane); - dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane); - dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane); + DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane); + DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane); + DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane); #endif } @@ -5120,19 +5058,19 @@ static void CalculateExtraLatency( *ExtraLatency_sr = *ExtraLatency_sr + Tarb; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type); - dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode); - dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips); - dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected); - dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock); - dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK); - dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW); - dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles); - dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes); - dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb); - dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency); - dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr); - dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch); + DML_LOG_VERBOSE("DML::%s: qos_type=%u\n", __func__, qos_type); + DML_LOG_VERBOSE("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode); + DML_LOG_VERBOSE("DML::%s: Tex_trips=%f\n", __func__, Tex_trips); + DML_LOG_VERBOSE("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected); + DML_LOG_VERBOSE("DML::%s: FabricClock=%f\n", __func__, FabricClock); + DML_LOG_VERBOSE("DML::%s: DCFCLK=%f\n", __func__, DCFCLK); + DML_LOG_VERBOSE("DML::%s: ReturnBW=%f\n", __func__, ReturnBW); + DML_LOG_VERBOSE("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles); + DML_LOG_VERBOSE("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes); + DML_LOG_VERBOSE("DML::%s: Tarb=%f\n", __func__, Tarb); + DML_LOG_VERBOSE("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency); + DML_LOG_VERBOSE("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr); + DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch); #endif } @@ -5187,7 +5125,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->Tsw_est3 = 0.0; s->cursor_prefetch_bytes = 0; *p->prefetch_cursor_bw = 0; - *p->RequiredPrefetchBWOTO = 0.0; + *p->RequiredPrefetchBWMax = 0.0; dcc_mrq_enable = (p->dcc_enable && p->mrq_present); @@ -5199,20 +5137,20 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->HostVMDynamicLevelsTrips = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable); - dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present); - dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable); - dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable); - dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels); - dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable); - dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup); - dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable); - dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); - dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait); - dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p); - dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip); - dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk); - dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk); + DML_LOG_VERBOSE("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable); + DML_LOG_VERBOSE("DML::%s: mrq_present = %u\n", __func__, p->mrq_present); + DML_LOG_VERBOSE("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable); + DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable); + DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels); + DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable); + DML_LOG_VERBOSE("DML::%s: VStartup = %u\n", __func__, p->VStartup); + DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable); + DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait); + DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p); + DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip); + DML_LOG_VERBOSE("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk); + DML_LOG_VERBOSE("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk); #endif CalculateVUpdateAndDynamicMetadataParameters( p->MaxInterDCNTileRepeaters, @@ -5258,11 +5196,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch if (p->DynamicMetadataEnable == true) { if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) { *p->NotEnoughTimeForDynamicMetadata = true; - dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__); - dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf); - dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec); - dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks); - dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl); + DML_LOG_VERBOSE("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__); + DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf); + DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec); + DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks); + DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl); } else { *p->NotEnoughTimeForDynamicMetadata = false; } @@ -5288,21 +5226,21 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch ((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled); - dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles); - dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock); - dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk); - dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles); - dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk); - dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay); - dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode); - dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH); - dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler); + DML_LOG_VERBOSE("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled); + DML_LOG_VERBOSE("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles); + DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock); + DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk); + DML_LOG_VERBOSE("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles); + DML_LOG_VERBOSE("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk); + DML_LOG_VERBOSE("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay); + DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode); + DML_LOG_VERBOSE("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH); + DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler); - dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut); - dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time); - dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame); - dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time); + DML_LOG_VERBOSE("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut); + DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time); + DML_LOG_VERBOSE("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame); + DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time); #endif if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP)) @@ -5314,17 +5252,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1)); *p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal))); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler); - dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler); + DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler); + DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler); #endif #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips); - dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips); - dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem); - dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch); - dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels); - dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips); + DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips); + DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips); + DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem); + DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch); + DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels); + DML_LOG_VERBOSE("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips); #endif if (p->display_cfg->gpuvm_enable) { s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime; @@ -5402,7 +5340,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch } /* oto prefetch bw should be always be less than total vactive bw */ - //DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface); + //DML_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface); s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor; @@ -5418,12 +5356,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule * and the required bandwidth increases when going from ms to mp */ - *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto; + *p->RequiredPrefetchBWMax = s->prefetch_bw_oto; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l); - dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c); - dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw); + DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l); + DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c); + DML_LOG_VERBOSE("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw); #endif if (p->display_cfg->gpuvm_enable == true) { @@ -5433,9 +5371,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->LineTime / 4.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips); - dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto); - dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0); + DML_LOG_VERBOSE("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips); + DML_LOG_VERBOSE("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto); + DML_LOG_VERBOSE("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0); #endif } else { s->Tvm_oto = s->Tvm_trips_rounded; @@ -5447,9 +5385,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto, s->LineTime / 4.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips); - dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto); - dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4); + DML_LOG_VERBOSE("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips); + DML_LOG_VERBOSE("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto); + DML_LOG_VERBOSE("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4); #endif } else s->Tr0_oto = s->LineTime / 4.0; @@ -5459,11 +5397,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto; #ifdef DML_GLOBAL_PREFETCH_CHECK - dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre); + DML_LOG_VERBOSE("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre); if (p->impacted_dst_y_pre > 0) { - dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto); + DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto); s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre); - dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto); + DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto); } #endif *p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime; @@ -5492,72 +5430,71 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal); - dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto); - dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ); - dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw); - dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip); - dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch); - dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem); - dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor); - dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY); - dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY); - dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub); - dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC); - dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC); - dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub); - dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes); - dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw); - dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp); - dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes); - dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow); - dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); - dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips); - dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips); - dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip); - dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip); - dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr); - dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto); - dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto); - dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto); - dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines); - dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines); - dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto); - dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto); - dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ); - dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes); - dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes); -#endif - double Tpre = s->dst_y_prefetch_equ * s->LineTime; + DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal); + DML_LOG_VERBOSE("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto); + DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ); + DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw); + DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip); + DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch); + DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem); + DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor); + DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY); + DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY); + DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub); + DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC); + DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC); + DML_LOG_VERBOSE("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub); + DML_LOG_VERBOSE("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes); + DML_LOG_VERBOSE("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw); + DML_LOG_VERBOSE("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp); + DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes); + DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow); + DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips); + DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips); + DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip); + DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip); + DML_LOG_VERBOSE("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr); + DML_LOG_VERBOSE("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto); + DML_LOG_VERBOSE("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto); + DML_LOG_VERBOSE("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto); + DML_LOG_VERBOSE("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines); + DML_LOG_VERBOSE("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines); + DML_LOG_VERBOSE("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto); + DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto); + DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ); + DML_LOG_VERBOSE("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes); + DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes); +#endif s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0; *p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ); - dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime); - dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup); - dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime); - dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup); - dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc); - dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait); - dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf); - dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec); - dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks); - dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait); - dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p); - dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip); - dml2_printf("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch); - dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm); - dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl); - dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p); - dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip); - dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler); - dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler); - dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor); - dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes); - dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw); - dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre)); - dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips)); + DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ); + DML_LOG_VERBOSE("DML::%s: LineTime: %f\n", __func__, s->LineTime); + DML_LOG_VERBOSE("DML::%s: VStartup: %u\n", __func__, p->VStartup); + DML_LOG_VERBOSE("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime); + DML_LOG_VERBOSE("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup); + DML_LOG_VERBOSE("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc); + DML_LOG_VERBOSE("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait); + DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf); + DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec); + DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks); + DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait); + DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p); + DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip); + DML_LOG_VERBOSE("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch); + DML_LOG_VERBOSE("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm); + DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl); + DML_LOG_VERBOSE("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p); + DML_LOG_VERBOSE("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip); + DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler); + DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler); + DML_LOG_VERBOSE("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes); + DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw); + DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, (s->dst_y_prefetch_equ * s->LineTime), *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime))); + DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips)); #endif *p->dst_y_per_vm_vblank = 0; @@ -5596,19 +5533,19 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch } else s->prefetch_bw1 = 0; - dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1); + DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1); if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) { s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes))); - dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded); - dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw); - dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ); - dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime); - dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw); - dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw)); - dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1); + DML_LOG_VERBOSE("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes))); + DML_LOG_VERBOSE("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded); + DML_LOG_VERBOSE("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw); + DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ); + DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime); + DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw); + DML_LOG_VERBOSE("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw)); + DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1); #endif } @@ -5620,10 +5557,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch } else s->prefetch_bw2 = 0; - dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2); + DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2); if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) { s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime); - dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2); + DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2); } // prefetch_bw3: 2*R0 + SW @@ -5634,10 +5571,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch } else s->prefetch_bw3 = 0; - dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3); + DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3); if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) { s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded); - dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3); + DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3); } // prefetch_bw4: SW @@ -5647,17 +5584,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->prefetch_bw4 = 0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw); - dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre)); - dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips)); - dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips)); - dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1); - dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2); - dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3); - dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1); - dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2); - dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3); - dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4); + DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw); + DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, s->dst_y_prefetch_equ * s->LineTime, *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime))); + DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips)); + DML_LOG_VERBOSE("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips)); + DML_LOG_VERBOSE("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1); + DML_LOG_VERBOSE("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2); + DML_LOG_VERBOSE("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3); + DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1); + DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2); + DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3); + DML_LOG_VERBOSE("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4); #endif { bool Case1OK = false; @@ -5676,14 +5613,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes); - dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded); - dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded); + DML_LOG_VERBOSE("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded); + DML_LOG_VERBOSE("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded); if (s->prefetch_bw1 > 0) { double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1; double row_transfer_time = total_row_bytes / s->prefetch_bw1; - dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time); - dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time); + DML_LOG_VERBOSE("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time); + DML_LOG_VERBOSE("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time); if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) { Case1OK = true; } @@ -5696,8 +5633,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch if (s->prefetch_bw2 > 0) { double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2; double row_transfer_time = total_row_bytes / s->prefetch_bw2; - dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time); - dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time); + DML_LOG_VERBOSE("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time); + DML_LOG_VERBOSE("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time); if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) { Case2OK = true; } @@ -5709,8 +5646,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch if (s->prefetch_bw3 > 0) { double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3; double row_transfer_time = total_row_bytes / s->prefetch_bw3; - dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time); - dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time); + DML_LOG_VERBOSE("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time); + DML_LOG_VERBOSE("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time); if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) { Case3OK = true; } @@ -5730,10 +5667,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK); - dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK); - dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK); - dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ); + DML_LOG_VERBOSE("DML::%s: Case1OK: %u\n", __func__, Case1OK); + DML_LOG_VERBOSE("DML::%s: Case2OK: %u\n", __func__, Case2OK); + DML_LOG_VERBOSE("DML::%s: Case3OK: %u\n", __func__, Case3OK); + DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ); #endif if (s->prefetch_bw_equ > 0) { @@ -5753,12 +5690,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch } else { s->Tvm_equ = 0; s->Tr0_equ = 0; - dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__); + DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ equals 0!\n", __func__); } } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ); - dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ); + DML_LOG_VERBOSE("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ); + DML_LOG_VERBOSE("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ); #endif // Use the more stressful prefetch schedule if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) { @@ -5769,7 +5706,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__); + DML_LOG_VERBOSE("DML::%s: Using oto scheduling for prefetch\n", __func__); #endif } else { @@ -5781,11 +5718,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->TimeForFetchingVM = s->Tvm_equ; s->TimeForFetchingRowInVBlank = s->Tr0_equ; - *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; - *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + + /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming. + * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data + * bandwidth may end up higher than what was calculated in mode support. + */ + *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__); + DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__); #endif } @@ -5797,31 +5740,31 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM); - dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank); - dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime); - dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch); - dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank); - dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank); - dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData); - dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY); - dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us); + DML_LOG_VERBOSE("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM); + DML_LOG_VERBOSE("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank); + DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime); + DML_LOG_VERBOSE("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch); + DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank); + DML_LOG_VERBOSE("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData); + DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY); + DML_LOG_VERBOSE("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us); - dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk); - dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line); - dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes); - dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw); + DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk); + DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line); + DML_LOG_VERBOSE("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes); + DML_LOG_VERBOSE("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw); #endif - DML2_ASSERT(*p->dst_y_prefetch < 64); + DML_ASSERT(*p->dst_y_prefetch < 64); unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime); if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) { *p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData; *p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY); - dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY); - dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY); + DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY); + DML_LOG_VERBOSE("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY); + DML_LOG_VERBOSE("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY); #endif if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) { if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) { @@ -5829,13 +5772,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch (double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0)); } else { s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY); + DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY); *p->VRatioPrefetchY = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY); - dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY); - dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY); + DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY); + DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY); + DML_LOG_VERBOSE("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY); #endif } @@ -5843,22 +5786,22 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC); - dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC); - dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC); + DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC); + DML_LOG_VERBOSE("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC); + DML_LOG_VERBOSE("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC); #endif if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) { if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) { *p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0)); } else { s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC); + DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC); *p->VRatioPrefetchC = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC); - dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC); - dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC); + DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC); + DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC); + DML_LOG_VERBOSE("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC); #endif } @@ -5866,36 +5809,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY); - dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub); - dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime); - dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma); - dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma); + DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY); + DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub); + DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime); + DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma); + DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma); #endif } else { s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required); - dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ); + DML_LOG_VERBOSE("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required); + DML_LOG_VERBOSE("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ); *p->VRatioPrefetchY = 0; *p->VRatioPrefetchC = 0; *p->RequiredPrefetchPixelDataBWLuma = 0; *p->RequiredPrefetchPixelDataBWChroma = 0; } - dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM); - dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM); - dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank); - dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime); - dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime); - dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n"); - dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup); - dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow); + DML_LOG_VERBOSE("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM); + DML_LOG_VERBOSE("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM); + DML_LOG_VERBOSE("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank); + DML_LOG_VERBOSE("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime); + DML_LOG_VERBOSE("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime); + DML_LOG_VERBOSE("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n"); + DML_LOG_VERBOSE("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup); + DML_LOG_VERBOSE("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow); } else { - dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ); - dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n", + DML_LOG_VERBOSE("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ); + DML_LOG_VERBOSE("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n", __func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime); - dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n", - __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch); s->NoTimeToPrefetch = true; s->TimeForFetchingVM = 0; s->TimeForFetchingRowInVBlank = 0; @@ -5916,18 +5857,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch prefetch_vm_bw = 0; } else if (*p->dst_y_per_vm_vblank > 0) { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); - dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank); - dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime); + DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime); #endif prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw); + DML_LOG_VERBOSE("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw); #endif } else { prefetch_vm_bw = 0; s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank); } if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) { @@ -5936,14 +5877,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow); - dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank); - dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw); + DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow); + DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank); + DML_LOG_VERBOSE("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw); #endif } else { prefetch_row_bw = 0; s->NoTimeToPrefetch = true; - dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank); + DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank); } *p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw); @@ -5963,12 +5904,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch *p->prefetch_vmrow_bw = 0; } - dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank); - dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank); - dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw); - dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma); - dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma); - dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch); + DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank); + DML_LOG_VERBOSE("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw); + DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma); + DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma); + DML_LOG_VERBOSE("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch); return s->NoTimeToPrefetch; } @@ -6005,7 +5946,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned } } if (max_idx <= 0) { - DML2_ASSERT(max_idx >= 0); + DML_ASSERT(max_idx >= 0); max_idx = this_plane_idx; } @@ -6037,12 +5978,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core // worst case if the rob and cdb is fully hogged s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes); - dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes); - dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes); - dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps); - dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz); - dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles); + DML_LOG_VERBOSE("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes); + DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes); + DML_LOG_VERBOSE("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes); + DML_LOG_VERBOSE("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps); + DML_LOG_VERBOSE("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz); + DML_LOG_VERBOSE("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles); #endif // calculate the return impact from each plane, request is 256B per dcfclk @@ -6063,12 +6004,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]); - dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l); - dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]); - dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]); - dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]); - dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det); + DML_LOG_VERBOSE("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]); + DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l); + DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]); + DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]); + DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]); + DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det); #endif if (s->src_swath_bytes_c[i] > 0) { // dual_plane @@ -6079,10 +6020,10 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c); - dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]); - dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]); - dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]); + DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c); + DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]); + DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]); + DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]); #endif } @@ -6090,9 +6031,9 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det); - dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us); - dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]); + DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det); + DML_LOG_VERBOSE("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us); + DML_LOG_VERBOSE("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]); #endif // clamping to worst case delay which is one which occupy the full rob+cdb if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles) @@ -6109,7 +6050,7 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k); + DML_LOG_VERBOSE("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k); #endif } @@ -6120,8 +6061,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core *p->recalc_prefetch_schedule = 1; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]); - dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]); + DML_LOG_VERBOSE("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]); + DML_LOG_VERBOSE("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]); #endif } } else { @@ -6131,8 +6072,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed); - dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule); + DML_LOG_VERBOSE("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed); + DML_LOG_VERBOSE("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule); #endif return s->prefetch_global_check_passed; @@ -6150,8 +6091,8 @@ static void calculate_peak_bandwidth_required( memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw); - dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes); + DML_LOG_VERBOSE("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw); + DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes); #endif for (unsigned int k = 0; k < p->num_active_planes; ++k) { @@ -6180,7 +6121,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax l->zero_array, l->zero_array, l->zero_array, @@ -6217,7 +6158,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6254,7 +6195,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6291,7 +6232,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6328,7 +6269,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6347,12 +6288,12 @@ static void calculate_peak_bandwidth_required( p->surface_peak_required_bw[m][n]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]); - dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]); - dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]); - dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]); + DML_LOG_VERBOSE("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]); + DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]); #endif - DML2_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]); + DML_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]); } } } @@ -6414,18 +6355,18 @@ static void check_urgent_bandwidth_support( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp); - dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram); - dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom); - dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp); - dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram); - dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall); - dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall); + DML_LOG_VERBOSE("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok); for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) { for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) { - dml2_printf("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n", + DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required[m][n]) ? "<" : ">=", urg_bandwidth_required[m][n]); } @@ -6446,14 +6387,14 @@ static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state)); - dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]); - dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]); - dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]); - dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]); - dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps); - dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps); - dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps); + DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state)); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]); + DML_LOG_VERBOSE("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps); + DML_LOG_VERBOSE("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps); + DML_LOG_VERBOSE("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps); #endif return flip_bw_available_mbps; @@ -6478,28 +6419,28 @@ static void calculate_immediate_flip_bandwidth_support( *flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n)); - dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]); - dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]); - dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]); - dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok); + DML_LOG_VERBOSE("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n)); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]); + DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]); + DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok); #endif - DML2_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]); + DML_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]); } *frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram; *flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state)); - dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp); - dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram); - dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip); - dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok); + DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state)); + DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp); + DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram); + DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip); + DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok); for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) { for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) { - dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n", + DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]); } @@ -6549,27 +6490,27 @@ static void CalculateFlipSchedule( l->dpte_row_bytes = DPTEBytesPerRow; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable); - dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us); - dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines); - dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip); - dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes); - dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw); - dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable); - dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor); - dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime); - dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip); - dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip); - dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip); - dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded); - dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded); - dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes); - dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow); - dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes); - dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes); - dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height); - dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height); - dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio); + DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable); + DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us); + DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines); + DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip); + DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes); + DML_LOG_VERBOSE("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw); + DML_LOG_VERBOSE("DML::%s: iflip_enable = %u\n", __func__, iflip_enable); + DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime); + DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip); + DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip); + DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip); + DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded); + DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded); + DML_LOG_VERBOSE("DML::%s: vm_bytes = %f\n", __func__, vm_bytes); + DML_LOG_VERBOSE("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow); + DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes); + DML_LOG_VERBOSE("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes); + DML_LOG_VERBOSE("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height); + DML_LOG_VERBOSE("DML::%s: meta_row_height = %d\n", __func__, meta_row_height); + DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio); #endif if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) { @@ -6596,9 +6537,9 @@ static void CalculateFlipSchedule( l->min_row_time = l->min_row_height * LineTime / VRatio; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time); + DML_LOG_VERBOSE("DML::%s: min_row_time = %f\n", __func__, l->min_row_time); #endif - DML2_ASSERT(l->min_row_time > 0); + DML_ASSERT(l->min_row_time > 0); if (use_lb_flip_bw) { // For mode check, calculation the flip bw requirement with worst case flip time @@ -6619,20 +6560,20 @@ static void CalculateFlipSchedule( l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded), l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time); - dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes); - dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes); - dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes); - dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip)); - dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded)); - dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded)); + DML_LOG_VERBOSE("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time); + DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes); + DML_LOG_VERBOSE("DML::%s: total row bytes (%f row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes); + DML_LOG_VERBOSE("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes); + DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip)); + DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded)); + DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded)); if (l->lb_flip_bw > 0) { - dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw); - dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows); - dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime); - dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows); - dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded)); + DML_LOG_VERBOSE("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw); + DML_LOG_VERBOSE("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows); + DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime); + DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows); + DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded)); } #endif l->lb_flip_bw = math_max3(l->lb_flip_bw, @@ -6640,8 +6581,8 @@ static void CalculateFlipSchedule( (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip); - dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime)); + DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip); + DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime)); #endif } @@ -6653,13 +6594,12 @@ static void CalculateFlipSchedule( } else { if (iflip_enable) { l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i) - double portion = (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes); - dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip); - dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW); - dml2_printf("DML::%s: portion of flip bw = %f\n", __func__, portion); + DML_LOG_VERBOSE("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes); + DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip); + DML_LOG_VERBOSE("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW); + DML_LOG_VERBOSE("DML::%s: portion of flip bw = %f\n", __func__, (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes); #endif if (l->ImmediateFlipBW == 0) { l->Tvm_flip = 0; @@ -6674,11 +6614,11 @@ static void CalculateFlipSchedule( LineTime / 4.0); } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor); - dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes)); + DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes)); - dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip); - dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip); + DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip); + DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip); #endif *dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0; *dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0; @@ -6711,14 +6651,14 @@ static void CalculateFlipSchedule( #ifdef __DML_VBA_DEBUG__ if (!use_lb_flip_bw) { - dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip); - dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip); - dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip); - dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip); - dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time); + DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip); + DML_LOG_VERBOSE("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip); + DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip); + DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip); + DML_LOG_VERBOSE("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time); } - dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw); - dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe); + DML_LOG_VERBOSE("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw); + DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe); #endif } @@ -6736,7 +6676,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark); + DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark); #endif p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency; @@ -6755,20 +6695,20 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency); - dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency); - dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency); - dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time); - dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime); - dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark); - dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark); - dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark); - dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark); - dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark); - dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark); - dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark); - dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark); - dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us); + DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency); + DML_LOG_VERBOSE("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency); + DML_LOG_VERBOSE("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency); + DML_LOG_VERBOSE("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time); + DML_LOG_VERBOSE("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime); + DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark); + DML_LOG_VERBOSE("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark); + DML_LOG_VERBOSE("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark); + DML_LOG_VERBOSE("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark); + DML_LOG_VERBOSE("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark); + DML_LOG_VERBOSE("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark); + DML_LOG_VERBOSE("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark); + DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark); + DML_LOG_VERBOSE("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us); #endif s->TotalActiveWriteback = 0; @@ -6801,11 +6741,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark); - dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark); - dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark); - dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired); - dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency); + DML_LOG_VERBOSE("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark); + DML_LOG_VERBOSE("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark); + DML_LOG_VERBOSE("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark); + DML_LOG_VERBOSE("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired); + DML_LOG_VERBOSE("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency); #endif s->TotalPixelBW = 0.0; @@ -6836,11 +6776,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines); - dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize); - dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel); - dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio); - dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps); + DML_LOG_VERBOSE("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines); + DML_LOG_VERBOSE("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize); + DML_LOG_VERBOSE("DML::%s: k=%u, LBBitPerPixel = %f\n", __func__, k, LBBitPerPixel); + DML_LOG_VERBOSE("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps); #endif s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz); @@ -6943,16 +6883,16 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]); - dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]); - dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]); - dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]); - dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]); - dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate); - dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l); - dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l); - dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]); - dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l); + DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate); + DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l); + DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l); + DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l); #endif p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l; @@ -6967,10 +6907,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]); - dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c); - dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c); - dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c); + DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c); + DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c); + DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c); #endif } } @@ -6992,10 +6932,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported); - dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported); - dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported); - dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport); + DML_LOG_VERBOSE("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported); + DML_LOG_VERBOSE("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported); + DML_LOG_VERBOSE("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported); + DML_LOG_VERBOSE("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport); #endif } @@ -7141,7 +7081,7 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc unsigned int index = 0; for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz); + DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz); if (i == 0) index = 0; @@ -7153,32 +7093,30 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc break; } } -#if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz); - dml2_printf("DML::%s: index = %d\n", __func__, index); -#endif + DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); + DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index); return index; } static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table) { unsigned int i; - bool clk_entry_found = 0; + bool clk_entry_found = false; for (i = 0; i < clk_table->uclk.num_clk_values; i++) { - dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]); + DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]); if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) { - clk_entry_found = 1; + clk_entry_found = true; break; } } if (!clk_entry_found) - DML2_ASSERT(clk_entry_found); + DML_ASSERT(clk_entry_found); #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); - dml2_printf("DML::%s: index = %d\n", __func__, i); + DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); + DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i); #endif return i; } @@ -7218,10 +7156,10 @@ static void calculate_hostvm_inefficiency_factor( if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs)) *HostVMInefficiencyFactorPrefetch = 4; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm); - dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only); - dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor); - dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm); + DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only); + DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor); + DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch); #endif } } @@ -7335,30 +7273,660 @@ static void calculate_pstate_keepout_dst_lines( } } +static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_internal_display_mode_lib *mode_lib, + const struct dml2_display_cfg *display_cfg) +{ + struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals; + struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params; + struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params; + struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params; +#ifdef DML_GLOBAL_PREFETCH_CHECK + struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params; +#endif + struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params; + + double min_return_bw_for_latency; + unsigned int k; + + mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep; + + calculate_hostvm_inefficiency_factor( + &s->HostVMInefficiencyFactor, + &s->HostVMInefficiencyFactorPrefetch, + + display_cfg->gpuvm_enable, + display_cfg->hostvm_enable, + mode_lib->ip.remote_iommu_outstanding_translations, + mode_lib->soc.max_outstanding_reqs, + mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active], + mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]); + + mode_lib->ms.Total3dlutActive = 0; + for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { + if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut) + mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1; + + // Calculate tdlut schedule related terms + calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK; + calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut; + calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode; + calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode; + calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size; + calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable; + calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes; + calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag; + calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling); + + // output + calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k]; + calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k]; + calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k]; + calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k]; + calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k]; + calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k]; + calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k]; + + calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params); + } + + min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active]; + + if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3) + s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes)); + + CalculateExtraLatency( + display_cfg, + mode_lib->ip.rob_buffer_size_kbytes, + mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles, + s->ReorderingBytes, + mode_lib->ms.DCFCLK, + mode_lib->ms.FabricClock, + mode_lib->ip.pixel_chunk_size_kbytes, + min_return_bw_for_latency, + mode_lib->ms.num_active_planes, + mode_lib->ms.NoOfDPP, + mode_lib->ms.dpte_group_bytes, + s->tdlut_bytes_per_group, + s->HostVMInefficiencyFactor, + s->HostVMInefficiencyFactorPrefetch, + mode_lib->soc.hostvm_min_page_size_kbytes, + mode_lib->soc.qos_parameters.qos_type, + !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable), + mode_lib->soc.max_outstanding_reqs, + mode_lib->ms.support.request_size_bytes_luma, + mode_lib->ms.support.request_size_bytes_chroma, + mode_lib->ip.meta_chunk_size_kbytes, + mode_lib->ip.dchub_arb_to_ret_delay, + mode_lib->ms.TripToMemory, + mode_lib->ip.hostvm_mode, + + // output + &mode_lib->ms.ExtraLatency, + &mode_lib->ms.ExtraLatency_sr, + &mode_lib->ms.ExtraLatencyPrefetch); + + for (k = 0; k < mode_lib->ms.num_active_planes; k++) + s->impacted_dst_y_pre[k] = 0; + + s->recalc_prefetch_schedule = 0; + s->recalc_prefetch_done = 0; + do { + mode_lib->ms.support.PrefetchSupported = true; + + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); + s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format; + + s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits, + mode_lib->ms.NoOfDPP[k], + display_cfg->plane_descriptors[k].composition.viewport.plane0.width, + display_cfg->plane_descriptors[k].composition.viewport.plane0.height, + display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, + display_cfg->plane_descriptors[k].composition.rotation_angle); + + s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits, + mode_lib->ms.NoOfDPP[k], + display_cfg->plane_descriptors[k].composition.viewport.plane1.width, + display_cfg->plane_descriptors[k].composition.viewport.plane1.height, + display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, + display_cfg->plane_descriptors[k].composition.rotation_angle); + + struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe; + + mode_lib->ms.TWait[k] = CalculateTWait( + display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns, + mode_lib->ms.UrgLatency, + mode_lib->ms.TripToMemory, + !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ? + get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx) : 0.0); + + myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k]; + myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK; + myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); + myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep; + myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k]; + myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled; + myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio; + myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio; + myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps; + myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps; + myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle; + myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored; + myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k]; + myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k]; + myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k]; + myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k]; + myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced; + myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors; + myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active; + myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total; + myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active; + myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable; + myPipe->ODMMode = mode_lib->ms.ODMMode[k]; + myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format; + myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k]; + myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k]; + myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported; + +#ifdef __DML_VBA_DEBUG__ + DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]); +#endif + CalculatePrefetchSchedule_params->display_cfg = display_cfg; + CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch; + CalculatePrefetchSchedule_params->myPipe = myPipe; + CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k]; + CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter; + CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl; + CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only; + CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor; + CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal; + CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio); + CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format; + CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters; + CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k]; + CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; + CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable; + CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled; + CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required; + CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes; + CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency; + CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch; + CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc; + CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k]; + CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k]; + CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k]; + CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k]; + CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k]; + CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k]; + CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k]; + CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k]; + CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k]; + CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k]; + CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k]; + CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k]; + CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k]; + CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory; + CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency; + CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut; + CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k]; + CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k]; + CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k]; + CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k]; + CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0); + CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k]; + CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k]; + CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable; + CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present; + CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k]; + CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k]; + CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k]; + CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k]; + CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k]; + + // output + CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k]; + CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k]; + CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k]; + CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k]; + CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k]; + CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k]; + CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k]; + CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l + CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k]; + CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k]; + CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k]; + CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k]; + CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k]; + CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0]; + CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1]; + CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2]; + CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k]; + CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k]; + CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k]; + CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k]; + CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k]; + CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k]; + CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0]; + CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1]; + CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2]; + CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k]; + CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k]; + CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k]; + CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k]; + CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k]; + + mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params); + + mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k]; + DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank); + } // for k num_planes + + CalculateDCFCLKDeepSleepTdlut( + display_cfg, + mode_lib->ms.num_active_planes, + mode_lib->ms.BytePerPixelY, + mode_lib->ms.BytePerPixelC, + mode_lib->ms.SwathWidthY, + mode_lib->ms.SwathWidthC, + mode_lib->ms.NoOfDPP, + mode_lib->ms.PSCL_FACTOR, + mode_lib->ms.PSCL_FACTOR_CHROMA, + mode_lib->ms.RequiredDPPCLK, + mode_lib->ms.vactive_sw_bw_l, + mode_lib->ms.vactive_sw_bw_c, + mode_lib->soc.return_bus_width_bytes, + mode_lib->ms.RequiredDISPCLK, + s->tdlut_bytes_to_deliver, + s->prefetch_swath_time_us, + + /* Output */ + &mode_lib->ms.dcfclk_deepsleep); + + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + if (mode_lib->ms.dst_y_prefetch[k] < 2.0 + || mode_lib->ms.LinesForVM[k] >= 32.0 + || mode_lib->ms.LinesForDPTERow[k] >= 16.0 + || mode_lib->ms.NoTimeForPrefetch[k] == true + || s->DSTYAfterScaler[k] > 8) { + mode_lib->ms.support.PrefetchSupported = false; + DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]); + } + } + + mode_lib->ms.support.DynamicMetadataSupported = true; + for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { + if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) { + mode_lib->ms.support.DynamicMetadataSupported = false; + } + } + + mode_lib->ms.support.VRatioInPrefetchSupported = true; + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ || + mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) { + mode_lib->ms.support.VRatioInPrefetchSupported = false; + DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__); + DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__); + DML_LOG_VERBOSE("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported); + } + } + + mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported; + + // By default, do not recalc prefetch schedule + s->recalc_prefetch_schedule = 0; + + // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok + if (mode_lib->ms.support.PrefetchSupported) { + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + // Calculate Urgent burst factor for prefetch +#ifdef __DML_VBA_DEBUG__ + DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]); +#endif + CalculateUrgentBurstFactor( + &display_cfg->plane_descriptors[k], + mode_lib->ms.swath_width_luma_ub[k], + mode_lib->ms.swath_width_chroma_ub[k], + mode_lib->ms.SwathHeightY[k], + mode_lib->ms.SwathHeightC[k], + s->line_times[k], + mode_lib->ms.UrgLatency, + mode_lib->ms.VRatioPreY[k], + mode_lib->ms.VRatioPreC[k], + mode_lib->ms.BytePerPixelInDETY[k], + mode_lib->ms.BytePerPixelInDETC[k], + mode_lib->ms.DETBufferSizeY[k], + mode_lib->ms.DETBufferSizeC[k], + /* Output */ + &mode_lib->ms.UrgentBurstFactorLumaPre[k], + &mode_lib->ms.UrgentBurstFactorChromaPre[k], + &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]); + } + + // Calculate urgent bandwidth required, both urg and non urg peak bandwidth + // assume flip bw is 0 at this point + for (k = 0; k < mode_lib->ms.num_active_planes; k++) + mode_lib->ms.final_flip_bw[k] = 0; + + calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required; + calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required; + calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual; + calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required; + calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw; + calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw; + + calculate_peak_bandwidth_params->display_cfg = display_cfg; + calculate_peak_bandwidth_params->inc_flip_bw = 0; + calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes; + calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP; + calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0; + calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1; + calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0; + calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1; + calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor; + calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor; + + calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l; + calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; + calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; + calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; + calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; + calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; + calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; + calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw; + calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw; + calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw; + calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw; + calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw; + calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma; + calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma; + calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor; + calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre; + calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre; + calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre; + + calculate_peak_bandwidth_required( + &mode_lib->scratch, + calculate_peak_bandwidth_params); + + // Check urg peak bandwidth against available urg bw + // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active) + check_urgent_bandwidth_support( + &s->dummy_single[0], // double* frac_urg_bandwidth + &s->dummy_single[1], // double* frac_urg_bandwidth_mall + &mode_lib->ms.support.UrgVactiveBandwidthSupport, + &mode_lib->ms.support.PrefetchBandwidthSupported, + + mode_lib->soc.mall_allocated_for_dcn_mbytes, + mode_lib->ms.support.non_urg_bandwidth_required, + mode_lib->ms.support.urg_vactive_bandwidth_required, + mode_lib->ms.support.urg_bandwidth_required, + mode_lib->ms.support.urg_bandwidth_available); + + mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported; + DML_LOG_VERBOSE("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported); + + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) { + mode_lib->ms.support.PrefetchSupported = false; + DML_LOG_VERBOSE("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]); + } + } + +#ifdef DML_GLOBAL_PREFETCH_CHECK + if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) { + CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes; + CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format; + CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024; + CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024; + CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l; + CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c; + CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY; + CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC; + CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes; + CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte; + CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY; + CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC; + CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l; + CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c; + CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes; + CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded; + CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto; + CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]; + CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times; + CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch; + if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024) + CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024; + + CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) / + ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0); + + // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible + CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule; + CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre; + mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params); + s->recalc_prefetch_done = 1; + s->recalc_prefetch_schedule = 1; + } +#endif + } // prefetch schedule ok, do urg bw and flip schedule + } while (s->recalc_prefetch_schedule); + + // Flip Schedule + // Both prefetch schedule and BW okay + if (mode_lib->ms.support.PrefetchSupported == true) { + mode_lib->ms.BandwidthAvailableForImmediateFlip = + get_bandwidth_available_for_immediate_flip( + dml2_core_internal_soc_state_sys_active, + mode_lib->ms.support.urg_bandwidth_required_qual, // no flip + mode_lib->ms.support.urg_bandwidth_available); + + mode_lib->ms.TotImmediateFlipBytes = 0; + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + if (display_cfg->plane_descriptors[k].immediate_flip) { + s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes( + s->HostVMInefficiencyFactor, + mode_lib->ms.vm_bytes[k], + mode_lib->ms.DPTEBytesPerRow[k], + mode_lib->ms.meta_row_bytes[k]); + } else { + s->per_pipe_flip_bytes[k] = 0; + } + mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k]; + + } + + for (k = 0; k < mode_lib->ms.num_active_planes; k++) { + CalculateFlipSchedule( + &mode_lib->scratch, + display_cfg->plane_descriptors[k].immediate_flip, + 1, // use_lb_flip_bw + s->HostVMInefficiencyFactor, + s->Tvm_trips_flip[k], + s->Tr0_trips_flip[k], + s->Tvm_trips_flip_rounded[k], + s->Tr0_trips_flip_rounded[k], + display_cfg->gpuvm_enable, + mode_lib->ms.vm_bytes[k], + mode_lib->ms.DPTEBytesPerRow[k], + mode_lib->ms.BandwidthAvailableForImmediateFlip, + mode_lib->ms.TotImmediateFlipBytes, + display_cfg->plane_descriptors[k].pixel_format, + (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)), + display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, + display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, + mode_lib->ms.Tno_bw_flip[k], + mode_lib->ms.dpte_row_height[k], + mode_lib->ms.dpte_row_height_chroma[k], + mode_lib->ms.use_one_row_for_frame_flip[k], + mode_lib->ip.max_flip_time_us, + mode_lib->ip.max_flip_time_lines, + s->per_pipe_flip_bytes[k], + mode_lib->ms.meta_row_bytes[k], + s->meta_row_height_luma[k], + s->meta_row_height_chroma[k], + mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable, + + /* Output */ + &mode_lib->ms.dst_y_per_vm_flip[k], + &mode_lib->ms.dst_y_per_row_flip[k], + &mode_lib->ms.final_flip_bw[k], + &mode_lib->ms.ImmediateFlipSupportedForPipe[k]); + } + + calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw; + calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip; + calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw; + calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip; + calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw; + calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw; + + calculate_peak_bandwidth_params->display_cfg = display_cfg; + calculate_peak_bandwidth_params->inc_flip_bw = 1; + calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes; + calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP; + calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0; + calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1; + calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0; + calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1; + calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor; + calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor; + + calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l; + calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; + calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; + calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; + calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; + calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; + calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; + calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw; + calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw; + calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw; + calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw; + calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw; + calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma; + calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma; + calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor; + calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre; + calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre; + calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre; + + calculate_peak_bandwidth_required( + &mode_lib->scratch, + calculate_peak_bandwidth_params); + + calculate_immediate_flip_bandwidth_support( + &s->dummy_single[0], // double* frac_urg_bandwidth_flip + &mode_lib->ms.support.ImmediateFlipSupport, + + dml2_core_internal_soc_state_sys_active, + mode_lib->ms.support.urg_bandwidth_required_flip, + mode_lib->ms.support.non_urg_bandwidth_required_flip, + mode_lib->ms.support.urg_bandwidth_available); + + for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { + if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false) + mode_lib->ms.support.ImmediateFlipSupport = false; + } + + } else { // if prefetch not support, assume iflip is not supported too + mode_lib->ms.support.ImmediateFlipSupport = false; + } + + s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency; + s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency; + s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr; + s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us; + s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; + s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us; + s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us; + s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us; + s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us; + s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us; + s->mSOCParameters.USRRetrainingLatency = 0; + s->mSOCParameters.SMNLatency = 0; + s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx); + s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, mode_lib->ms.state_idx); + s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock; + s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type; + + CalculateWatermarks_params->display_cfg = display_cfg; + CalculateWatermarks_params->USRRetrainingRequired = false; + CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; + CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines; + CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits; + CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes; + CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK; + CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings; + CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change; + CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes; + CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters; + CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes; + CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK; + CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep; + CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY; + CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC; + CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY; + CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC; + CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY; + CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC; + CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP; + CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY; + CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC; + CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler; + CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler; + CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled; + CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte; + CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma; + CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma; + + // Output + CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark + CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport; + CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported; + CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[] + CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[] + CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport; + CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported; + CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported + CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport; + CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support; + CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin; + CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs; + + CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params); + + calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]); + DML_LOG_VERBOSE("DML::%s: Done prefetch calculation\n", __func__); + +} + + static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params) { struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib; const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg; const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table; -#if defined(__DML_VBA_DEBUG__) - double old_ReadBandwidthLuma; - double old_ReadBandwidthChroma; -#endif double outstanding_latency_us = 0; - double min_return_bw_for_latency; struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals; - struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params; struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params; struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params; - struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params; -#ifdef DML_GLOBAL_PREFETCH_CHECK - struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params; -#endif - struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params; struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params; - struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params; struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params *calculate_bytes_to_fetch_required_to_hide_latency_params = &mode_lib->scratch.calculate_bytes_to_fetch_required_to_hide_latency_params; unsigned int k, m, n; @@ -7374,9 +7942,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000); mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000; mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000; - mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000; + mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000; mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000; - mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000; + mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000; mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000); mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000); @@ -7384,25 +7952,25 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table); #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: --- START --- \n", __func__); - dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes); - dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index); - dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index); - dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK); - dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps); - dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz); - dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK); - dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock); - dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK); - dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz); - dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz); - dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz); - dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock); - dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes); - dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present); + DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__); + DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes); + DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index); + DML_LOG_VERBOSE("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index); + DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK); + DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps); + DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK); + DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock); + DML_LOG_VERBOSE("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK); + DML_LOG_VERBOSE("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock); + DML_LOG_VERBOSE("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes); + DML_LOG_VERBOSE("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present); for (k = 0; k < mode_lib->ms.num_active_planes; k++) - dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); + DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); #endif CalculateMaxDETAndMinCompressedBufferSize( @@ -7504,12 +8072,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)); #ifdef __DML_VBA_DEBUG__ - old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio; - old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0; - dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma); - dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma); - dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]); - dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0); + DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]); #endif } @@ -7629,13 +8195,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma); mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma); - dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]); - dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma); - dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma); - dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]); - dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma); - dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma); + DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma); } /* Cursor Support Check */ @@ -7672,11 +8238,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) { mode_lib->ms.support.PitchSupport = false; #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]); - dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch); - dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]); - dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch); - dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport); + DML_LOG_VERBOSE("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u PitchY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch); + DML_LOG_VERBOSE("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u PitchC = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch); + DML_LOG_VERBOSE("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport); #endif } @@ -7708,11 +8274,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) { mode_lib->ms.support.ViewportExceedsSurface = true; #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width); - dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width); - dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height); - dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height); - dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface); + DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width); + DML_LOG_VERBOSE("DML::%s: k=%u SurfaceWidthY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width); + DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height); + DML_LOG_VERBOSE("DML::%s: k=%u SurfaceHeightY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height); + DML_LOG_VERBOSE("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface); #endif } if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) { @@ -7894,8 +8460,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC; } #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]); - dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]); + DML_LOG_VERBOSE("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]); + DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]); #endif // ensure the number dsc slices is integer multiple based on ODM mode @@ -7911,9 +8477,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.DSCSlicesODMModeSupported = ((mode_lib->ms.support.NumberOfDSCSlices[k] % 4) == 0); #if defined(__DML_VBA_DEBUG__) if (!mode_lib->ms.support.DSCSlicesODMModeSupported) { - dml2_printf("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k); - dml2_printf("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices); - dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]); + DML_LOG_VERBOSE("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices); + DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]); } #endif } else { @@ -7958,7 +8524,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) { - dml2_printf("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__); + DML_LOG_VERBOSE("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__); } } else { if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) { @@ -7968,7 +8534,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out } } #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]); #endif } @@ -8138,7 +8704,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout); - if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) { + if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_ss_clocks_khz.dtbclk / 1000)) { mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true; } } else { @@ -8167,7 +8733,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out s->DSCFormatFactor = 1; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]); #endif if (mode_lib->ms.RequiresDSC[k] == true) { s->PixelClockBackEndFactor = 3.0; @@ -8185,10 +8751,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]); - dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]); - dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor); - dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor); + DML_LOG_VERBOSE("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported); #endif } } @@ -8423,13 +8989,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]); - dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]); #endif } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded); - dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded); + DML_LOG_VERBOSE("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded); + DML_LOG_VERBOSE("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded); #endif /* VActive bytes to fetch for UCLK P-State */ @@ -8502,7 +9068,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); - bool cursor_not_enough_urgent_latency_hiding = 0; + bool cursor_not_enough_urgent_latency_hiding = false; if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) { calculate_cursor_req_attributes( @@ -8531,9 +9097,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k); - dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); - dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio); #endif CalculateUrgentBurstFactor( @@ -8605,7 +9171,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]); #endif /* Immediate Flip and MALL parameters */ @@ -8654,16 +9220,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out (s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod); - dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod); - dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod); - dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz); - dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState); - dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index); - dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK); - dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock); - dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz); - dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes))); + DML_LOG_VERBOSE("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod); + DML_LOG_VERBOSE("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod); + DML_LOG_VERBOSE("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod); + DML_LOG_VERBOSE("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz); + DML_LOG_VERBOSE("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState); + DML_LOG_VERBOSE("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index); + DML_LOG_VERBOSE("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK); + DML_LOG_VERBOSE("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock); + DML_LOG_VERBOSE("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz); #endif mode_lib->ms.support.OutstandingRequestsSupport = true; @@ -8703,10 +9268,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us); - dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us); - dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]); - dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us); + DML_LOG_VERBOSE("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us); + DML_LOG_VERBOSE("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us); + DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us); #endif } @@ -8722,8 +9287,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]); - dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us); + DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us); #endif } } @@ -8869,7 +9434,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) { mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false; - dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k); } } @@ -8878,639 +9443,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) { mode_lib->ms.support.AvgBandwidthSupport = false; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n)); + DML_LOG_VERBOSE("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n)); #endif } } } - /* Prefetch Check */ - { - mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep; - - calculate_hostvm_inefficiency_factor( - &s->HostVMInefficiencyFactor, - &s->HostVMInefficiencyFactorPrefetch, - - display_cfg->gpuvm_enable, - display_cfg->hostvm_enable, - mode_lib->ip.remote_iommu_outstanding_translations, - mode_lib->soc.max_outstanding_reqs, - mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active], - mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]); - - mode_lib->ms.Total3dlutActive = 0; - for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { - if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut) - mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1; - - // Calculate tdlut schedule related terms - calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK; - calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut; - calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode; - calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode; - calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size; - calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable; - calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes; - calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag; - calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling); - - // output - calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k]; - calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k]; - calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k]; - calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k]; - calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k]; - calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k]; - calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k]; - - calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params); - } - - min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active]; - - if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3) - s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes, - mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes, - mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes)); - - CalculateExtraLatency( - display_cfg, - mode_lib->ip.rob_buffer_size_kbytes, - mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles, - s->ReorderingBytes, - mode_lib->ms.DCFCLK, - mode_lib->ms.FabricClock, - mode_lib->ip.pixel_chunk_size_kbytes, - min_return_bw_for_latency, - mode_lib->ms.num_active_planes, - mode_lib->ms.NoOfDPP, - mode_lib->ms.dpte_group_bytes, - s->tdlut_bytes_per_group, - s->HostVMInefficiencyFactor, - s->HostVMInefficiencyFactorPrefetch, - mode_lib->soc.hostvm_min_page_size_kbytes, - mode_lib->soc.qos_parameters.qos_type, - !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable), - mode_lib->soc.max_outstanding_reqs, - mode_lib->ms.support.request_size_bytes_luma, - mode_lib->ms.support.request_size_bytes_chroma, - mode_lib->ip.meta_chunk_size_kbytes, - mode_lib->ip.dchub_arb_to_ret_delay, - mode_lib->ms.TripToMemory, - mode_lib->ip.hostvm_mode, - - // output - &mode_lib->ms.ExtraLatency, - &mode_lib->ms.ExtraLatency_sr, - &mode_lib->ms.ExtraLatencyPrefetch); - - for (k = 0; k < mode_lib->ms.num_active_planes; k++) - s->impacted_dst_y_pre[k] = 0; - - s->recalc_prefetch_schedule = 0; - s->recalc_prefetch_done = 0; - do { - mode_lib->ms.support.PrefetchSupported = true; - - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); - s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format; - - s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits, - mode_lib->ms.NoOfDPP[k], - display_cfg->plane_descriptors[k].composition.viewport.plane0.width, - display_cfg->plane_descriptors[k].composition.viewport.plane0.height, - display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio, - display_cfg->plane_descriptors[k].composition.rotation_angle); - - s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits, - mode_lib->ms.NoOfDPP[k], - display_cfg->plane_descriptors[k].composition.viewport.plane1.width, - display_cfg->plane_descriptors[k].composition.viewport.plane1.height, - display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio, - display_cfg->plane_descriptors[k].composition.rotation_angle); - - struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe; - - mode_lib->ms.TWait[k] = CalculateTWait( - display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns, - mode_lib->ms.UrgLatency, - mode_lib->ms.TripToMemory, - !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ? - get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0); - - myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k]; - myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK; - myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); - myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep; - myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k]; - myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled; - myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio; - myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio; - myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps; - myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps; - myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle; - myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored; - myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k]; - myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k]; - myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k]; - myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k]; - myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced; - myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors; - myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active; - myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total; - myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active; - myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable; - myPipe->ODMMode = mode_lib->ms.ODMMode[k]; - myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format; - myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k]; - myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k]; - myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported; - -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k); - dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]); -#endif - CalculatePrefetchSchedule_params->display_cfg = display_cfg; - CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch; - CalculatePrefetchSchedule_params->myPipe = myPipe; - CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k]; - CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter; - CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl; - CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only; - CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor; - CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal; - CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio); - CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format; - CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters; - CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k]; - CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; - CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable; - CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled; - CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required; - CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes; - CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency; - CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch; - CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc; - CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k]; - CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k]; - CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k]; - CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k]; - CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k]; - CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k]; - CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k]; - CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k]; - CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k]; - CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k]; - CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k]; - CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k]; - CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k]; - CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory; - CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency; - CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut; - CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k]; - CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k]; - CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k]; - CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k]; - CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0); - CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k]; - CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k]; - CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable; - CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present; - CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k]; - CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k]; - CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k]; - CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k]; - CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k]; - - // output - CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k]; - CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k]; - CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k]; - CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k]; - CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k]; - CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k]; - CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k]; - CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l - CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k]; - CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k]; - CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k]; - CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k]; - CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k]; - CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0]; - CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1]; - CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2]; - CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k]; - CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k]; - CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k]; - CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k]; - CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k]; - CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k]; - CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0]; - CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1]; - CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2]; - CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k]; - CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k]; - CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k]; - CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k]; - CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k]; - - mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params); - - mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k]; - dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank); - dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank); - } // for k num_planes - - CalculateDCFCLKDeepSleepTdlut( - display_cfg, - mode_lib->ms.num_active_planes, - mode_lib->ms.BytePerPixelY, - mode_lib->ms.BytePerPixelC, - mode_lib->ms.SwathWidthY, - mode_lib->ms.SwathWidthC, - mode_lib->ms.NoOfDPP, - mode_lib->ms.PSCL_FACTOR, - mode_lib->ms.PSCL_FACTOR_CHROMA, - mode_lib->ms.RequiredDPPCLK, - mode_lib->ms.vactive_sw_bw_l, - mode_lib->ms.vactive_sw_bw_c, - mode_lib->soc.return_bus_width_bytes, - mode_lib->ms.RequiredDISPCLK, - s->tdlut_bytes_to_deliver, - s->prefetch_swath_time_us, - - /* Output */ - &mode_lib->ms.dcfclk_deepsleep); - - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (mode_lib->ms.dst_y_prefetch[k] < 2.0 - || mode_lib->ms.LinesForVM[k] >= 32.0 - || mode_lib->ms.LinesForDPTERow[k] >= 16.0 - || mode_lib->ms.NoTimeForPrefetch[k] == true - || s->DSTYAfterScaler[k] > 8) { - mode_lib->ms.support.PrefetchSupported = false; - dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]); - dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]); - dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]); - dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]); - dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]); - } - } - - mode_lib->ms.support.DynamicMetadataSupported = true; - for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { - if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) { - mode_lib->ms.support.DynamicMetadataSupported = false; - } - } - - mode_lib->ms.support.VRatioInPrefetchSupported = true; - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ || - mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) { - mode_lib->ms.support.VRatioInPrefetchSupported = false; - dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__); - dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__); - dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported); - } - } - - mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported; - - // By default, do not recalc prefetch schedule - s->recalc_prefetch_schedule = 0; - - // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok - if (mode_lib->ms.support.PrefetchSupported) { - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - // Calculate Urgent burst factor for prefetch -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k); - dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]); - dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]); -#endif - CalculateUrgentBurstFactor( - &display_cfg->plane_descriptors[k], - mode_lib->ms.swath_width_luma_ub[k], - mode_lib->ms.swath_width_chroma_ub[k], - mode_lib->ms.SwathHeightY[k], - mode_lib->ms.SwathHeightC[k], - s->line_times[k], - mode_lib->ms.UrgLatency, - mode_lib->ms.VRatioPreY[k], - mode_lib->ms.VRatioPreC[k], - mode_lib->ms.BytePerPixelInDETY[k], - mode_lib->ms.BytePerPixelInDETC[k], - mode_lib->ms.DETBufferSizeY[k], - mode_lib->ms.DETBufferSizeC[k], - /* Output */ - &mode_lib->ms.UrgentBurstFactorLumaPre[k], - &mode_lib->ms.UrgentBurstFactorChromaPre[k], - &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]); - } - - // Calculate urgent bandwidth required, both urg and non urg peak bandwidth - // assume flip bw is 0 at this point - for (k = 0; k < mode_lib->ms.num_active_planes; k++) - mode_lib->ms.final_flip_bw[k] = 0; - - calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required; - calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required; - calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual; - calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required; - calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw; - calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw; - - calculate_peak_bandwidth_params->display_cfg = display_cfg; - calculate_peak_bandwidth_params->inc_flip_bw = 0; - calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes; - calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP; - calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0; - calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1; - calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0; - calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1; - calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor; - calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor; - - calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l; - calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; - calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; - calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; - calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; - calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; - calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; - calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw; - calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw; - calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw; - calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw; - calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw; - calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma; - calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma; - calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor; - calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre; - calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre; - calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre; - - calculate_peak_bandwidth_required( - &mode_lib->scratch, - calculate_peak_bandwidth_params); - - // Check urg peak bandwidth against available urg bw - // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active) - check_urgent_bandwidth_support( - &s->dummy_single[0], // double* frac_urg_bandwidth - &s->dummy_single[1], // double* frac_urg_bandwidth_mall - &mode_lib->ms.support.UrgVactiveBandwidthSupport, - &mode_lib->ms.support.PrefetchBandwidthSupported, - - mode_lib->soc.mall_allocated_for_dcn_mbytes, - mode_lib->ms.support.non_urg_bandwidth_required, - mode_lib->ms.support.urg_vactive_bandwidth_required, - mode_lib->ms.support.urg_bandwidth_required, - mode_lib->ms.support.urg_bandwidth_available); - - mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported; - dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported); - - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) { - mode_lib->ms.support.PrefetchSupported = false; - dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]); - } - } - -#ifdef DML_GLOBAL_PREFETCH_CHECK - if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) { - CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes; - CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format; - CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024; - CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024; - CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l; - CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c; - CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY; - CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC; - CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes; - CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte; - CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY; - CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC; - CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l; - CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c; - CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes; - CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded; - CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto; - CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]; - CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times; - CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch; - if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024) - CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024; - - CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) / - ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0); - - // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible - CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule; - CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre; - mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params); - s->recalc_prefetch_done = 1; - s->recalc_prefetch_schedule = 1; - } -#endif - } // prefetch schedule ok, do urg bw and flip schedule - } while (s->recalc_prefetch_schedule); - - // Flip Schedule - // Both prefetch schedule and BW okay - if (mode_lib->ms.support.PrefetchSupported == true) { - mode_lib->ms.BandwidthAvailableForImmediateFlip = - get_bandwidth_available_for_immediate_flip( - dml2_core_internal_soc_state_sys_active, - mode_lib->ms.support.urg_bandwidth_required_qual, // no flip - mode_lib->ms.support.urg_bandwidth_available); - - mode_lib->ms.TotImmediateFlipBytes = 0; - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - if (display_cfg->plane_descriptors[k].immediate_flip) { - s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes( - s->HostVMInefficiencyFactor, - mode_lib->ms.vm_bytes[k], - mode_lib->ms.DPTEBytesPerRow[k], - mode_lib->ms.meta_row_bytes[k]); - } else { - s->per_pipe_flip_bytes[k] = 0; - } - mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k]; - - } - - for (k = 0; k < mode_lib->ms.num_active_planes; k++) { - CalculateFlipSchedule( - &mode_lib->scratch, - display_cfg->plane_descriptors[k].immediate_flip, - 1, // use_lb_flip_bw - s->HostVMInefficiencyFactor, - s->Tvm_trips_flip[k], - s->Tr0_trips_flip[k], - s->Tvm_trips_flip_rounded[k], - s->Tr0_trips_flip_rounded[k], - display_cfg->gpuvm_enable, - mode_lib->ms.vm_bytes[k], - mode_lib->ms.DPTEBytesPerRow[k], - mode_lib->ms.BandwidthAvailableForImmediateFlip, - mode_lib->ms.TotImmediateFlipBytes, - display_cfg->plane_descriptors[k].pixel_format, - (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)), - display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio, - display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio, - mode_lib->ms.Tno_bw_flip[k], - mode_lib->ms.dpte_row_height[k], - mode_lib->ms.dpte_row_height_chroma[k], - mode_lib->ms.use_one_row_for_frame_flip[k], - mode_lib->ip.max_flip_time_us, - mode_lib->ip.max_flip_time_lines, - s->per_pipe_flip_bytes[k], - mode_lib->ms.meta_row_bytes[k], - s->meta_row_height_luma[k], - s->meta_row_height_chroma[k], - mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable, - - /* Output */ - &mode_lib->ms.dst_y_per_vm_flip[k], - &mode_lib->ms.dst_y_per_row_flip[k], - &mode_lib->ms.final_flip_bw[k], - &mode_lib->ms.ImmediateFlipSupportedForPipe[k]); - } - - calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw; - calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip; - calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw; - calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip; - calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw; - calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw; - - calculate_peak_bandwidth_params->display_cfg = display_cfg; - calculate_peak_bandwidth_params->inc_flip_bw = 1; - calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes; - calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP; - calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0; - calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1; - calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0; - calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1; - calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor; - calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor; - - calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l; - calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; - calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; - calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; - calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; - calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; - calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; - calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw; - calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw; - calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw; - calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw; - calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw; - calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma; - calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma; - calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor; - calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre; - calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre; - calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre; - - calculate_peak_bandwidth_required( - &mode_lib->scratch, - calculate_peak_bandwidth_params); - - calculate_immediate_flip_bandwidth_support( - &s->dummy_single[0], // double* frac_urg_bandwidth_flip - &mode_lib->ms.support.ImmediateFlipSupport, - - dml2_core_internal_soc_state_sys_active, - mode_lib->ms.support.urg_bandwidth_required_flip, - mode_lib->ms.support.non_urg_bandwidth_required_flip, - mode_lib->ms.support.urg_bandwidth_available); - - for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { - if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false) - mode_lib->ms.support.ImmediateFlipSupport = false; - } - - } else { // if prefetch not support, assume iflip is not supported too - mode_lib->ms.support.ImmediateFlipSupport = false; - } - - s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency; - s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency; - s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr; - s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us; - s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us; - s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us; - s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us; - s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us; - s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us; - s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us; - s->mSOCParameters.USRRetrainingLatency = 0; - s->mSOCParameters.SMNLatency = 0; - s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index); - s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index); - s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock; - s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type; - - CalculateWatermarks_params->display_cfg = display_cfg; - CalculateWatermarks_params->USRRetrainingRequired = false; - CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; - CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines; - CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits; - CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes; - CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK; - CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings; - CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change; - CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes; - CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters; - CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes; - CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK; - CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep; - CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY; - CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC; - CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY; - CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC; - CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY; - CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC; - CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP; - CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY; - CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC; - CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler; - CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler; - CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled; - CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte; - CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma; - CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma; - - // Output - CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark - CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport; - CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported; - CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[] - CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[] - CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport; - CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported; - CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported - CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport; - CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support; - CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin; - CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs; - - CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params); - - calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]); - } - dml2_printf("DML::%s: Done prefetch calculation\n", __func__); - // End of Prefetch Check + dml_core_ms_prefetch_check(mode_lib, display_cfg); mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us; @@ -9546,8 +9485,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.dram_change_vactive_det_fill_delay_us); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us); - dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport); + DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us); + DML_LOG_VERBOSE("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport); #endif /*Mode Support, Voltage State and SOC Configuration*/ @@ -9597,17 +9536,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out && !mode_lib->ms.support.ExceededMALLSize && mode_lib->ms.support.g6_temp_read_support && ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) { - dml2_printf("DML::%s: mode is supported\n", __func__); + DML_LOG_VERBOSE("DML::%s: mode is supported\n", __func__); mode_lib->ms.support.ModeSupport = true; } else { - dml2_printf("DML::%s: mode is NOT supported\n", __func__); + DML_LOG_VERBOSE("DML::%s: mode is NOT supported\n", __func__); mode_lib->ms.support.ModeSupport = false; } } // Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0). - dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport); - dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport); + DML_LOG_VERBOSE("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport); + DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport); for (k = 0; k < mode_lib->ms.num_active_planes; k++) { mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k]; @@ -9623,8 +9562,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k]; #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]); - dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]); #endif } @@ -9632,7 +9571,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out if (!mode_lib->ms.support.ModeSupport) dml2_print_mode_support_info(&mode_lib->ms.support, true); - dml2_printf("DML::%s: --- DONE --- \n", __func__); + DML_LOG_VERBOSE("DML::%s: --- DONE --- \n", __func__); #endif return mode_lib->ms.support.ModeSupport; @@ -9642,18 +9581,18 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support { unsigned int result; - dml2_printf("DML::%s: ------------- START ----------\n", __func__); + DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__); result = dml_core_mode_support(in_out_params); if (result) *in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support; - dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index); + DML_LOG_VERBOSE("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index); for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++) - dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); + DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); - dml2_printf("DML::%s: ------------- DONE ----------\n", __func__); + DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__); return result; } @@ -9687,19 +9626,19 @@ static void CalculatePixelDeliveryTimes( double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio); - dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); - dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio); - dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio); - dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]); - dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]); - dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]); - dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]); - dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]); - dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]); - dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used); - dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz); - dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used); + DML_LOG_VERBOSE("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz); + DML_LOG_VERBOSE("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]); #endif if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) { DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz; @@ -9733,10 +9672,10 @@ static void CalculatePixelDeliveryTimes( } } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]); - dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]); - dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]); - dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]); #endif } @@ -9752,12 +9691,12 @@ static void CalculatePixelDeliveryTimes( DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k]; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]); - dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]); - dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]); - dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]); - dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]); - dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]); #endif } } @@ -9853,14 +9792,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]); - dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]); - dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]); - dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]); - dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]); - dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]); - dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]); - dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]); #endif } @@ -9881,7 +9820,7 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE else p->time_per_tdlut_group[k] = 0; - dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]); if (p->display_cfg->gpuvm_enable == true) { if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) { @@ -9897,14 +9836,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE if (dpte_groups_per_row_luma_ub <= 2) { dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1; } - dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]); - dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]); - dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]); - dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]); - dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]); - dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]); - dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma); - dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub); + DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub); p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub; p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub; @@ -9928,9 +9867,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE if (dpte_groups_per_row_chroma_ub <= 2) { dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1; } - dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]); - dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma); - dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma); + DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub); p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub; p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub; @@ -9945,17 +9884,17 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE p->time_per_pte_group_flip_chroma[k] = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]); - dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]); - dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]); - dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]); - dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]); - dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]); - dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]); - dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]); - dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]); - dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]); #endif } } // CalculateMetaAndPTETimes @@ -9991,18 +9930,18 @@ static void CalculateVMGroupAndRequestTimes( double line_time; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); + DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces); #endif for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) { double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable); - dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]); - dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]); - dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]); - dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]); - dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable); + DML_LOG_VERBOSE("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]); #endif if (display_cfg->gpuvm_enable) { @@ -10071,13 +10010,13 @@ static void CalculateVMGroupAndRequestTimes( else TimePerVMRequestFlip[k] = 0.0; - dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]); - dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]); - dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time); - dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref); - dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip); - dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref); - dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip); + DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time); + DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %d\n", __func__, k, num_group_per_lower_vm_stage_pref); + DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %d\n", __func__, k, num_group_per_lower_vm_stage_flip); + DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %d\n", __func__, k, num_req_per_lower_vm_stage_pref); + DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %d\n", __func__, k, num_req_per_lower_vm_stage_flip); if (display_cfg->gpuvm_max_page_table_levels > 2) { TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2; @@ -10094,10 +10033,10 @@ static void CalculateVMGroupAndRequestTimes( } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]); - dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]); - dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]); - dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]); #endif } } @@ -10113,7 +10052,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc unsigned int SingleVTotal = 0; bool SameTiming = true; bool FoundCriticalSurface = false; - double LastZ8StutterPeriod = 0; memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals)); @@ -10127,9 +10065,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc } l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]); - dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0); - dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma); + DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0); + DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma); #endif l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0; l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma; @@ -10142,9 +10080,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc } l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]); - dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1); - dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma); + DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1); + DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma); #endif l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1; l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma; @@ -10160,19 +10098,19 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled); - dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth); - dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth); - dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth); - dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma); - dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma); - dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate); - dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction); + DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled); + DML_LOG_VERBOSE("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth); + DML_LOG_VERBOSE("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth); + DML_LOG_VERBOSE("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth); + DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma); + DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma); + DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate); + DML_LOG_VERBOSE("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction); - dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0); - dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs); - dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte); - dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte); + DML_LOG_VERBOSE("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0); + DML_LOG_VERBOSE("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs); + DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte); + DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte); #endif if (l->AverageDCCZeroSizeFraction == 1) { l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth; @@ -10189,10 +10127,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate); - dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate)); - dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64)); - dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate)); + DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate); + DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate)); + DML_LOG_VERBOSE("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64)); + DML_LOG_VERBOSE("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate)); #endif } else { l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate, @@ -10200,16 +10138,16 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc ((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate); - dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate); + DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate); + DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate); #endif } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries); - dml2_printf("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries); - dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate); - dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0); + DML_LOG_VERBOSE("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries); + DML_LOG_VERBOSE("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries); + DML_LOG_VERBOSE("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate); + DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0); #endif *p->StutterPeriod = 0; @@ -10220,15 +10158,15 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]); l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024); - dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]); - dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]); - dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]); - dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth); - dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY); - dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath); - dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); - dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY); + DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024); + DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth); + DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY); + DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath); + DML_LOG_VERBOSE("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY); #endif if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) { @@ -10248,17 +10186,17 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface); - dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod); - dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface); - dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface); - dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface); - dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface); - dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface); - dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface); - dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface); - dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface); - dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod); + DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface); + DML_LOG_VERBOSE("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface); #endif } } @@ -10276,14 +10214,14 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate); - dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0); - dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0); - dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024); - dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW); - dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth); - dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth); - dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK); + DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate); + DML_LOG_VERBOSE("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0); + DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0); + DML_LOG_VERBOSE("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024); + DML_LOG_VERBOSE("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW); + DML_LOG_VERBOSE("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth); + DML_LOG_VERBOSE("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth); + DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK); #endif l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer @@ -10292,10 +10230,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc / math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) + *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)); - dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64)); - dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW); - dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime); + DML_LOG_VERBOSE("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)); + DML_LOG_VERBOSE("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64)); + DML_LOG_VERBOSE("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW); + DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime); #endif l->TotalActiveWriteback = 0; memset(l->stream_visited, 0, DML2_MAX_PLANES * sizeof(bool)); @@ -10324,9 +10262,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc if (l->TotalActiveWriteback == 0) { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime); - dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time); - dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod); + DML_LOG_VERBOSE("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime); + DML_LOG_VERBOSE("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time); + DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod); #endif *p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100; *p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100; @@ -10339,11 +10277,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc *p->Z8NumberOfStutterBurstsPerFrame = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface); - dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank); - dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank); - dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame); - dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame); + DML_LOG_VERBOSE("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface); + DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank); + DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank); + DML_LOG_VERBOSE("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame); + DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame); #endif if (*p->StutterEfficiencyNotIncludingVBlank > 0) { @@ -10358,7 +10296,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc } if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) { - LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod; + //LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod; if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) { *p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank; } else { @@ -10370,25 +10308,25 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG); - dml2_printf("DML::%s: SameTiming = %u\n", __func__, SameTiming); - dml2_printf("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings); - dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod); - dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark); - dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime); - dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod); - dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency); - dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency); - dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank); - dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame); + DML_LOG_VERBOSE("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG); + DML_LOG_VERBOSE("DML::%s: SameTiming = %u\n", __func__, SameTiming); + DML_LOG_VERBOSE("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings); + DML_LOG_VERBOSE("DML::%s: LastZ8StutterPeriod = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod : 0); + DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark); + DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime); + DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod); + DML_LOG_VERBOSE("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency); + DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency); + DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank); + DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame); #endif *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface); - dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte); - dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE); + DML_LOG_VERBOSE("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface); + DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte); + DML_LOG_VERBOSE("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE); #endif } @@ -10422,7 +10360,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex double max_uclk_mhz = 0; double min_return_latency_in_DCFCLK_cycles = 0; - dml2_printf("DML::%s: --- START --- \n", __func__); + DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__); memset(&mode_lib->scratch, 0, sizeof(struct dml2_core_internal_scratch)); memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program)); @@ -10444,13 +10382,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex for (k = 0; k < s->num_active_planes; ++k) { unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index; - DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4); - DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 || + DML_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4); + DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 || cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 || cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1); if (cfg_support_info->stream_support_info[stream_index].odms_used > 1) - DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1); + DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1); switch (cfg_support_info->stream_support_info[stream_index].odms_used) { case (4): @@ -10476,51 +10414,51 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex for (k = 0; k < s->num_active_planes; ++k) { mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used; mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0; - DML2_ASSERT(mode_lib->mp.Dppclk[k] > 0); + DML_ASSERT(mode_lib->mp.Dppclk[k] > 0); } for (k = 0; k < s->num_active_planes; ++k) { unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index; mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0; - dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]); + DML_LOG_VERBOSE("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]); } mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0; mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0; - DML2_ASSERT(mode_lib->mp.Dcfclk > 0); - DML2_ASSERT(mode_lib->mp.FabricClock > 0); - DML2_ASSERT(mode_lib->mp.dram_bw_mbps > 0); - DML2_ASSERT(mode_lib->mp.uclk_freq_mhz > 0); - DML2_ASSERT(mode_lib->mp.GlobalDPPCLK > 0); - DML2_ASSERT(mode_lib->mp.Dispclk > 0); - DML2_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0); - DML2_ASSERT(s->SOCCLK > 0); - -#ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes); - dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes); - dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk); - dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock); - dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps); - dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz); - dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk); + DML_ASSERT(mode_lib->mp.Dcfclk > 0); + DML_ASSERT(mode_lib->mp.FabricClock > 0); + DML_ASSERT(mode_lib->mp.dram_bw_mbps > 0); + DML_ASSERT(mode_lib->mp.uclk_freq_mhz > 0); + DML_ASSERT(mode_lib->mp.GlobalDPPCLK > 0); + DML_ASSERT(mode_lib->mp.Dispclk > 0); + DML_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0); + DML_ASSERT(s->SOCCLK > 0); + +#ifdef __DML_VBA_DEBUG__ + DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes); + DML_LOG_VERBOSE("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes); + DML_LOG_VERBOSE("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk); + DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock); + DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps); + DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz); + DML_LOG_VERBOSE("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk); for (k = 0; k < s->num_active_planes; ++k) { - dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]); - } - dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK); - dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep); - dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK); - dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index); - dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz); - dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config)); + DML_LOG_VERBOSE("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]); + } + DML_LOG_VERBOSE("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK); + DML_LOG_VERBOSE("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep); + DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK); + DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index); + DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz); + DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config)); for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) { - dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]); - dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]); + DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]); + DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]); } for (k = 0; k < s->num_active_planes; k++) - dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); + DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns); #endif CalculateMaxDETAndMinCompressedBufferSize( @@ -10617,8 +10555,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)); mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio; mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio; - dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]); - dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]); + DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]); + DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]); } CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg; @@ -11097,7 +11035,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin); for (k = 0; k < s->num_active_planes; ++k) { - bool cursor_not_enough_urgent_latency_hiding = 0; + bool cursor_not_enough_urgent_latency_hiding = false; s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); @@ -11173,8 +11111,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.WritebackDelay[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); - dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]); + DML_LOG_VERBOSE("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); + DML_LOG_VERBOSE("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]); #endif } @@ -11183,7 +11121,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required); + DML_LOG_VERBOSE("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required); #endif if (s->num_active_planes > 1) { @@ -11219,12 +11157,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex s->DestinationLineTimesForPrefetchLessThan2 = false; s->VRatioPrefetchMoreThanMax = false; - dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__); + DML_LOG_VERBOSE("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__); for (k = 0; k < s->num_active_planes; ++k) { struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe; - dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); + DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); mode_lib->mp.TWait[k] = CalculateTWait( display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns, mode_lib->mp.UrgentLatency, @@ -11261,7 +11199,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k); #endif CalculatePrefetchSchedule_params->display_cfg = display_cfg; CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch; @@ -11325,7 +11263,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]; - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k]; @@ -11356,7 +11294,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.impacted_prefetch_margin_us[k] = 0; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]); #endif mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k]; } // for k @@ -11366,9 +11304,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex if (mode_lib->mp.NoTimeToPrefetch[k] == true || mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] || mode_lib->mp.DSTYAfterScaler[k] > 8) { - dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]); - dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]); - dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]); mode_lib->mp.PrefetchModeSupported = false; } if (mode_lib->mp.dst_y_prefetch[k] < 2) @@ -11377,24 +11315,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ || mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) { s->VRatioPrefetchMoreThanMax = true; - dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__); - dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__); - dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax); + DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__); + DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__); + DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax); } if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) { - dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]); mode_lib->mp.PrefetchModeSupported = false; } } if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) { - dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax); - dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2); + DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax); + DML_LOG_VERBOSE("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2); mode_lib->mp.PrefetchModeSupported = false; } - dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__, + DML_LOG_VERBOSE("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__, mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup); // Prefetch schedule OK, now check prefetch bw @@ -11422,24 +11360,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex &mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]); - dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]); - dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]); - dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]); - dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]); - dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]); - dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); + DML_LOG_VERBOSE("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio); - dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]); - dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]); - dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]); - dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]); - dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]); - dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]); - dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]); - dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]); - dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]); + DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]); #endif } @@ -11468,7 +11406,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw; @@ -11503,11 +11441,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.urg_bandwidth_available); if (!mode_lib->mp.PrefetchModeSupported) - dml2_printf("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__); + DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__); for (k = 0; k < s->num_active_planes; ++k) { if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) { - dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]); mode_lib->mp.PrefetchModeSupported = false; } } @@ -11533,12 +11471,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex } mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k = %u\n", __func__, k); - dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]); - dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]); - dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]); - dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]); - dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes); + DML_LOG_VERBOSE("DML::%s: k = %u\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]); + DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]); + DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]); + DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]); + DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes); #endif } for (k = 0; k < s->num_active_planes; ++k) { @@ -11608,7 +11546,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw; calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw; calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw; calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma; calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma; @@ -11631,13 +11569,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.urg_bandwidth_available); if (!mode_lib->mp.ImmediateFlipSupported) - dml2_printf("DML::%s: Bandwidth not sufficient for flip!", __func__); + DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for flip!", __func__); for (k = 0; k < s->num_active_planes; ++k) { if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) { mode_lib->mp.ImmediateFlipSupported = false; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k); #endif } } @@ -11650,28 +11588,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported); + DML_LOG_VERBOSE("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported); for (k = 0; k < s->num_active_planes; ++k) - dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip); - dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable); - dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported); - dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported); + DML_LOG_VERBOSE("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip); + DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable); + DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported); + DML_LOG_VERBOSE("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported); #endif - dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]); + DML_LOG_VERBOSE("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]); } for (k = 0; k < s->num_active_planes; ++k) - dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); + DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) { - dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__); + DML_LOG_VERBOSE("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__); } else { - dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__); + DML_LOG_VERBOSE("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__); // DCC Configuration for (k = 0; k < s->num_active_planes; ++k) { #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k); + DML_LOG_VERBOSE("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k); #endif CalculateDCCConfiguration( display_cfg->plane_descriptors[k].surface.dcc.enable, @@ -11780,8 +11718,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines); - dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index); - dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz)); + DML_LOG_VERBOSE("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index); + DML_LOG_VERBOSE("DML::%s: DEBUG PixelClock = %ld kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz)); //Display Pipeline Delivery Time in Prefetch, Groups CalculatePixelDeliveryTimes( @@ -11893,15 +11831,15 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]); #endif s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000); mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin); - dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); - dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin); + DML_LOG_VERBOSE("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]); #endif mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin; @@ -11920,9 +11858,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k]; if (s->blank_lines_remaining < 0) { - dml2_printf("ERROR: Vstartup is larger than vblank!?\n"); + DML_LOG_VERBOSE("ERROR: Vstartup is larger than vblank!?\n"); s->blank_lines_remaining = 0; - DML2_ASSERT(0); + DML_ASSERT(0); } mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup; @@ -11936,23 +11874,23 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]); - dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]); - dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]); - dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]); - dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]); - dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total); - dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total); - dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active); - dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch); - dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]); - dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]); - dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, HTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total); + DML_LOG_VERBOSE("DML::%s: k=%u, VTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total); + DML_LOG_VERBOSE("DML::%s: k=%u, VActive = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active); + DML_LOG_VERBOSE("DML::%s: k=%u, VFrontPorch = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch); + DML_LOG_VERBOSE("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]); #endif } //Maximum Bandwidth Used - s->TotalWRBandwidth = 0; + mode_lib->mp.TotalWRBandwidth = 0; for (k = 0; k < display_cfg->num_streams; ++k) { s->WRBandwidth = 0; if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) { @@ -11961,7 +11899,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000)) * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0); - s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth; + mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth; } } @@ -11969,9 +11907,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex for (k = 0; k < s->num_active_planes; ++k) { mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k]; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth); - dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]); - dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth); + DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]); + DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]); #endif } @@ -12051,28 +11989,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz; mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles; mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles); - DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256); + DML_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz); - dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz); - dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz); - dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles); - dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles); - dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles); - dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis); - dml2_printf("DML::%s: --- END --- \n", __func__); + DML_LOG_VERBOSE("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz); + DML_LOG_VERBOSE("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz); + DML_LOG_VERBOSE("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz); + DML_LOG_VERBOSE("DML::%s: min_return_uclk_cycles = %ld\n", __func__, min_return_uclk_cycles); + DML_LOG_VERBOSE("DML::%s: min_return_fclk_cycles = %ld\n", __func__, min_return_fclk_cycles); + DML_LOG_VERBOSE("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles); + DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis); + DML_LOG_VERBOSE("DML::%s: --- END --- \n", __func__); #endif return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported); } bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params) { - dml2_printf("DML::%s: ------------- START ----------\n", __func__); + DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__); bool result = dml_core_mode_programming(in_out_params); - dml2_printf("DML::%s: result = %0d\n", __func__, result); - dml2_printf("DML::%s: ------------- DONE ----------\n", __func__); + DML_LOG_VERBOSE("DML::%s: result = %0d\n", __func__, result); + DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__); return result; } @@ -12130,16 +12068,16 @@ void dml2_core_calcs_get_dpte_row_height( unsigned int MacroTileHeight = is_plane1 ? MacroTileHeightC : MacroTileHeightY; unsigned int PTEBufferSizeInRequests = is_plane1 ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML: %s: is_plane1 = %u\n", __func__, is_plane1); - dml2_printf("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel); - dml2_printf("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes); - dml2_printf("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes); - dml2_printf("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth); - dml2_printf("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight); - dml2_printf("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests); - dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma); - dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma); - dml2_printf("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes); + DML_LOG_VERBOSE("DML: %s: is_plane1 = %u\n", __func__, is_plane1); + DML_LOG_VERBOSE("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel); + DML_LOG_VERBOSE("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes); + DML_LOG_VERBOSE("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes); + DML_LOG_VERBOSE("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth); + DML_LOG_VERBOSE("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight); + DML_LOG_VERBOSE("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests); + DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma); + DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma); + DML_LOG_VERBOSE("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes); #endif unsigned int dummy_integer[21]; @@ -12193,16 +12131,16 @@ void dml2_core_calcs_get_dpte_row_height( CalculateVMAndRowBytes(&mode_lib->scratch.calculate_vm_and_row_bytes_params); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height); + DML_LOG_VERBOSE("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height); #endif } static bool is_dual_plane(enum dml2_source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha)) - ret_val = 1; + ret_val = true; return ret_val; } @@ -12220,6 +12158,8 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz); wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz); wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz); + wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz); + wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz); wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz); wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz); wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz); @@ -12246,11 +12186,11 @@ void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs cursor_dlg_regs->dst_x_offset = (unsigned int) ((dst_x_offset > 0) ? dst_x_offset : 0); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position); - dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz); - dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz); - dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset); - dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset); + DML_LOG_VERBOSE("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position); + DML_LOG_VERBOSE("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz); + DML_LOG_VERBOSE("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz); + DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset); + DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset); #endif cursor_dlg_regs->chunk_hdl_adjust = 3; @@ -12286,7 +12226,7 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs, double stored_swath_c_bytes; bool is_phantom_pipe; - dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx); + DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx); pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024); min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes); @@ -12329,19 +12269,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs, if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) { unsigned int p0_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear); + DML_LOG_VERBOSE("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear); #endif - DML2_ASSERT(p0_pte_row_height_linear >= 8); + DML_ASSERT(p0_pte_row_height_linear >= 8); rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3; if (dual_plane) { unsigned int p1_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx)); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear); + DML_LOG_VERBOSE("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear); #endif if (sw_mode == dml2_sw_linear) { - DML2_ASSERT(p1_pte_row_height_linear >= 8); + DML_ASSERT(p1_pte_row_height_linear >= 8); } rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3; } @@ -12375,12 +12315,12 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs, if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) { detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr); + DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr); #endif } else { detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr); + DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr); #endif } } @@ -12388,15 +12328,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs, rq_regs->plane1_base_address = detile_buf_plane1_addr; #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe); - dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes); - dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes); - dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes); - dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr); - dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address); + DML_LOG_VERBOSE("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe); + DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes); + DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes); + DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes); + DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr); + DML_LOG_VERBOSE("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address); #endif - //dml2_printf_rq_regs_st(rq_regs); - dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx); + //DML_LOG_VERBOSE_rq_regs_st(rq_regs); + DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx); } static void rq_dlg_get_dlg_reg( @@ -12411,10 +12351,10 @@ static void rq_dlg_get_dlg_reg( memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals)); - dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx); + DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx); l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx); - DML2_ASSERT(l->plane_idx < DML2_MAX_PLANES); + DML_ASSERT(l->plane_idx < DML2_MAX_PLANES); l->source_format = dml2_444_8; l->odm_mode = dml2_odm_mode_bypass; @@ -12444,18 +12384,18 @@ static void rq_dlg_get_dlg_reg( l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000; l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz; - dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx); - dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal); - dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz); - dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz); - dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz); - dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz); - dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq); - dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced); + DML_LOG_VERBOSE("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx); + DML_LOG_VERBOSE("DML_DLG: %s: htotal = %d\n", __func__, l->htotal); + DML_LOG_VERBOSE("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz); + DML_LOG_VERBOSE("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz); + DML_LOG_VERBOSE("DML_DLG: %s: soc.refclk_mhz = %d\n", __func__, mode_lib->soc.dchub_refclk_mhz); + DML_LOG_VERBOSE("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz); + DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq); + DML_LOG_VERBOSE("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced); - DML2_ASSERT(l->refclk_freq_in_mhz != 0); - DML2_ASSERT(l->pclk_freq_in_mhz != 0); - DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0); + DML_ASSERT(l->refclk_freq_in_mhz != 0); + DML_ASSERT(l->pclk_freq_in_mhz != 0); + DML_ASSERT(l->ref_freq_to_pix_freq < 4.0); // Need to figure out which side of odm combine we're in // Assume the pipe instance under the same plane is in order @@ -12484,14 +12424,14 @@ static void rq_dlg_get_dlg_reg( l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.) disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq); - dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx); - dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane); - dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine); - dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor); + DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx); + DML_LOG_VERBOSE("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane); + DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine); + DML_LOG_VERBOSE("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor); } - dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end); - DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13)); disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19)); disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8)); @@ -12500,20 +12440,20 @@ static void rq_dlg_get_dlg_reg( l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]]; l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]); - dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank); - dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start); - dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq); + DML_LOG_VERBOSE("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank); + DML_LOG_VERBOSE("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start); + DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq); l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]); disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0; - dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0); + DML_LOG_VERBOSE("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0); l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]); l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]); - dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler); - dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler); + DML_LOG_VERBOSE("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler); + DML_LOG_VERBOSE("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler); l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]]; l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]]; @@ -12521,28 +12461,28 @@ static void rq_dlg_get_dlg_reg( l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]]; l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]]; - dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch); - dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip); - dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip); - dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank); - dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank); + DML_LOG_VERBOSE("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch); + DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip); + DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip); + DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank); if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) { - DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank)); + DML_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank)); } l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]]; l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]]; - dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l); - dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c); + DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l); + DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c); // Active l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; - dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l); - dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l); l->refcyc_per_line_delivery_pre_c = 0.0; l->refcyc_per_line_delivery_c = 0.0; @@ -12551,8 +12491,8 @@ static void rq_dlg_get_dlg_reg( l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; - dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c); - dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c); } disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz); @@ -12561,8 +12501,8 @@ static void rq_dlg_get_dlg_reg( l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; - dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l); - dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l); l->refcyc_per_req_delivery_pre_c = 0.0; l->refcyc_per_req_delivery_c = 0.0; @@ -12570,16 +12510,16 @@ static void rq_dlg_get_dlg_reg( l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz; - dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c); - dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c); + DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c); } // TTU - Cursor - DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1); + DML_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1); // Assign to register structures disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2)); - DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18)); + DML_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18)); disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk @@ -12592,10 +12532,10 @@ static void rq_dlg_get_dlg_reg( disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19)); disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19)); - dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank); - dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank); - dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); - dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); + DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank); + DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank); + DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip); + DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip); disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz); disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz); @@ -12662,11 +12602,11 @@ static void rq_dlg_get_dlg_reg( disp_ttu_regs->qos_ramp_disable_c = 0; disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz); - // CHECK for HW registers' range, DML2_ASSERT or clamp - DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13)); - DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13)); - DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13)); - DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13)); + // CHECK for HW registers' range, DML_ASSERT or clamp + DML_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13)); + DML_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13)); + DML_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13)); + DML_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13)); if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23)) disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1); @@ -12680,16 +12620,16 @@ static void rq_dlg_get_dlg_reg( disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1); - DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8); - DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8); + DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13)); if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) { - dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1); + DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1); l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1; } if (l->dual_plane) { if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) { - dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1); + DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1); l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1; } } @@ -12700,20 +12640,20 @@ static void rq_dlg_get_dlg_reg( if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23)) disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1); } - DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13)); if (l->dual_plane) { - DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13)); } - DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13)); - DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13)); - DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13)); - DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13)); - DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14)); - DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14)); - DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24)); + DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13)); + DML_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14)); + DML_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14)); + DML_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24)); - dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx); + DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx); } } @@ -12736,11 +12676,11 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz); #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding); - dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit); - dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes); - dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req); - dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis); + DML_LOG_VERBOSE("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding); + DML_LOG_VERBOSE("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit); + DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes); + DML_LOG_VERBOSE("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req); + DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis); #endif } @@ -13013,10 +12953,10 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000; #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us); - dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us); - dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines); - dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us); + DML_LOG_VERBOSE("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us); + DML_LOG_VERBOSE("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us); + DML_LOG_VERBOSE("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines); + DML_LOG_VERBOSE("DML::%s: vblank_reserved_time_us = %u\n", __func__, out->vblank_reserved_time_us); #endif } @@ -13129,6 +13069,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4; else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4) out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4; out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k]; out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k]; @@ -13313,7 +13257,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k]; out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k]; - out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0; + out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0; out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k]; out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k]; out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h index 4e502f0a6d20..28687565ac22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h @@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe { double DCFClkDeepSleep; unsigned int DPPPerSurface; bool ScalerEnabled; + bool UPSPEnabled; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate { dml2_core_internal_output_rate_hdmi_rate_6x4 = 9, dml2_core_internal_output_rate_hdmi_rate_8x4 = 10, dml2_core_internal_output_rate_hdmi_rate_10x4 = 11, - dml2_core_internal_output_rate_hdmi_rate_12x4 = 12 + dml2_core_internal_output_rate_hdmi_rate_12x4 = 12, + dml2_core_internal_output_rate_hdmi_rate_16x4 = 13, + dml2_core_internal_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_core_internal_watermarks { @@ -260,12 +263,14 @@ struct dml2_core_internal_mode_support_info { bool AvgBandwidthSupport; bool UrgVactiveBandwidthSupport; bool EnoughUrgentLatencyHidingSupport; + bool PrefetchScheduleSupported; bool PrefetchSupported; bool PrefetchBandwidthSupported; bool DynamicMetadataSupported; bool VRatioInPrefetchSupported; bool DISPCLK_DPPCLK_Support; bool TotalAvailablePipesSupport; + bool ODMSupport; bool ModeSupport; bool ViewportSizeSupport; @@ -314,9 +319,7 @@ struct dml2_core_internal_mode_support_info { double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - double max_urgent_latency_us; double max_non_urgent_latency_us; double avg_non_urgent_latency_us; @@ -329,6 +332,8 @@ struct dml2_core_internal_mode_support_info { bool temp_read_or_ppt_support; struct dml2_core_internal_watermarks watermarks; + bool dcfclk_support; + bool qos_bandwidth_support; }; struct dml2_core_internal_mode_support { @@ -350,9 +355,11 @@ struct dml2_core_internal_mode_support { double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes + double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes double uclk_freq_mhz; double dram_bw_mbps; double max_dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting @@ -394,9 +401,13 @@ struct dml2_core_internal_mode_support { double TWait[DML2_MAX_PLANES]; bool UnboundedRequestEnabled; + unsigned int compbuf_reserved_space_64b; + bool hw_debug5; unsigned int CompressedBufferSizeInkByte; double VRatioPreY[DML2_MAX_PLANES]; double VRatioPreC[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_l[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_c[DML2_MAX_PLANES]; unsigned int swath_width_luma_ub[DML2_MAX_PLANES]; unsigned int swath_width_chroma_ub[DML2_MAX_PLANES]; unsigned int RequiredSlots[DML2_MAX_PLANES]; @@ -417,8 +428,8 @@ struct dml2_core_internal_mode_support { double dst_y_prefetch[DML2_MAX_PLANES]; double LinesForVM[DML2_MAX_PLANES]; double LinesForDPTERow[DML2_MAX_PLANES]; - double SwathWidthYSingleDPP[DML2_MAX_PLANES]; - double SwathWidthCSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES]; unsigned int BytePerPixelY[DML2_MAX_PLANES]; unsigned int BytePerPixelC[DML2_MAX_PLANES]; double BytePerPixelInDETY[DML2_MAX_PLANES]; @@ -469,13 +480,58 @@ struct dml2_core_internal_mode_support { double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES]; + bool is_using_mall_for_ss[DML2_MAX_PLANES]; + unsigned int meta_row_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES]; + bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; + unsigned int meta_req_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES]; + unsigned int meta_req_width[DML2_MAX_PLANES]; + unsigned int meta_row_width[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear[DML2_MAX_PLANES]; + unsigned int PTERequestSizeY[DML2_MAX_PLANES]; + unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES]; + unsigned int PTERequestSizeC[DML2_MAX_PLANES]; + unsigned int meta_req_height[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int meta_req_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES]; + unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; + unsigned int vm_group_bytes[DML2_MAX_PLANES]; + unsigned int VReadyOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateWidthPix[DML2_MAX_PLANES]; + double TSetup[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; + unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos. + double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES]; + double MaxActiveFCLKChangeLatencySupported; + // Backend bool RequiresDSC[DML2_MAX_PLANES]; bool RequiresFEC[DML2_MAX_PLANES]; double OutputBpp[DML2_MAX_PLANES]; + double DesiredOutputBpp[DML2_MAX_PLANES]; + double PixelClockBackEnd[DML2_MAX_PLANES]; unsigned int DSCDelay[DML2_MAX_PLANES]; enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES]; enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES]; + bool TotalAvailablePipesSupportNoDSC; + bool TotalAvailablePipesSupportDSC; + unsigned int NumberOfDPPNoDSC; + unsigned int NumberOfDPPDSC; + enum dml2_odm_mode ODMModeNoDSC; + enum dml2_odm_mode ODMModeDSC; + double RequiredDISPCLKPerSurfaceNoDSC; + double RequiredDISPCLKPerSurfaceDSC; + unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES]; // Bandwidth Related Info double BandwidthAvailableForImmediateFlip; @@ -484,8 +540,14 @@ struct dml2_core_internal_mode_support { double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK]; double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES]; double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES]; - /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */ - double RequiredPrefetchBWOTO[DML2_MAX_PLANES]; + /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches. + * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp + * + * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support. + * Some slight difference in variables may cause the pixel data bandwidth to be higher + * even though overall equ prefetch bandwidths can be lower going from ms to mp + */ + double RequiredPrefetchBWMax[DML2_MAX_PLANES]; double cursor_bw[DML2_MAX_PLANES]; double prefetch_cursor_bw[DML2_MAX_PLANES]; double prefetch_vmrow_bw[DML2_MAX_PLANES]; @@ -538,7 +600,44 @@ struct dml2_core_internal_mode_support { bool mall_comb_mcache_c[DML2_MAX_PLANES]; bool lc_comb_mcache[DML2_MAX_PLANES]; + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + + unsigned int meta_row_height_luma[DML2_MAX_PLANES]; + unsigned int meta_row_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + + unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + + unsigned int MaximumVStartup[DML2_MAX_PLANES]; + + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + + unsigned int DSTYAfterScaler[DML2_MAX_PLANES]; + unsigned int DSTXAfterScaler[DML2_MAX_PLANES]; + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; }; /// @brief A mega structure that houses various info for model programming step. @@ -548,6 +647,7 @@ struct dml2_core_internal_mode_program { double FabricClock; /// <brief Basically just the clock freq at the min (or given) state //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double uclk_freq_mhz; unsigned int NoOfDPP[DML2_MAX_PLANES]; enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; @@ -599,6 +699,8 @@ struct dml2_core_internal_mode_program { unsigned int MacroTileHeightC[DML2_MAX_PLANES]; unsigned int MacroTileWidthY[DML2_MAX_PLANES]; unsigned int MacroTileWidthC[DML2_MAX_PLANES]; + double MaximumSwathWidthLuma[DML2_MAX_PLANES]; + double MaximumSwathWidthChroma[DML2_MAX_PLANES]; bool surf_linear128_l[DML2_MAX_PLANES]; bool surf_linear128_c[DML2_MAX_PLANES]; @@ -631,6 +733,14 @@ struct dml2_core_internal_mode_program { double UrgentBurstFactorChroma[DML2_MAX_PLANES]; double UrgentBurstFactorChromaPre[DML2_MAX_PLANES]; + double MaximumSwathWidthInLineBufferLuma; + double MaximumSwathWidthInLineBufferChroma; + + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + double meta_row_bw[DML2_MAX_PLANES]; unsigned int meta_row_bytes[DML2_MAX_PLANES]; unsigned int meta_req_width[DML2_MAX_PLANES]; @@ -652,7 +762,9 @@ struct dml2_core_internal_mode_program { unsigned int PTERequestSizeC[DML2_MAX_PLANES]; double TWait[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; double Tdmdl_vm[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; double Tdmdl[DML2_MAX_PLANES]; double TSetup[DML2_MAX_PLANES]; unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; @@ -684,6 +796,38 @@ struct dml2_core_internal_mode_program { double TCalc; unsigned int TotImmediateFlipBytes; + unsigned int MaxTotalDETInKByte; + unsigned int NomDETInKByte; + unsigned int MinCompressedBufferSizeInKByte; + double PixelClockBackEnd[DML2_MAX_PLANES]; + double OutputBpp[DML2_MAX_PLANES]; + bool dsc_enable[DML2_MAX_PLANES]; + unsigned int num_dsc_slices[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + bool immediate_flip_required; // any pipes need immediate flip + double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state + double TotalWRBandwidth; + double max_urgent_latency_us; + double df_response_time_us; + // ------------------- // Output // ------------------- @@ -694,9 +838,12 @@ struct dml2_core_internal_mode_program { // Support bool UrgVactiveBandwidthSupport; + bool PrefetchScheduleSupported; + bool UrgentBandwidthSupport; bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported bool ImmediateFlipSupported; bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES]; + bool dcfclk_support; // Clock double Dcfclk; @@ -788,7 +935,7 @@ struct dml2_core_internal_mode_program { // RQ registers bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; - + double VActiveLatencyHidingUs[DML2_MAX_PLANES]; unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES]; bool is_using_mall_for_ss[DML2_MAX_PLANES]; @@ -1001,10 +1148,10 @@ struct dml2_core_calcs_mode_programming_locals { double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; - unsigned int dummy_integer_array[2][DML2_MAX_PLANES]; + unsigned int dummy_integer_array[4][DML2_MAX_PLANES]; enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES]; double dummy_single_array[2][DML2_MAX_PLANES]; - unsigned int dummy_long_array[4][DML2_MAX_PLANES]; + unsigned int dummy_long_array[8][DML2_MAX_PLANES]; bool dummy_boolean_array[2][DML2_MAX_PLANES]; bool dummy_boolean[2]; double dummy_single[2]; @@ -1028,7 +1175,6 @@ struct dml2_core_calcs_mode_programming_locals { double dlg_vblank_start; double LSetup; double blank_lines_remaining; - double TotalWRBandwidth; double WRBandwidth; struct dml2_core_internal_DmlPipe myPipe; double PixelClockBackEndFactor; @@ -1078,6 +1224,8 @@ struct dml2_core_calcs_mode_programming_locals { enum dml2_source_format_class pixel_format[DML2_MAX_PLANES]; unsigned int lb_source_lines_l[DML2_MAX_PLANES]; unsigned int lb_source_lines_c[DML2_MAX_PLANES]; + unsigned int num_dsc_slices[DML2_MAX_PLANES]; + bool dsc_enable[DML2_MAX_PLANES]; }; struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals { @@ -1151,6 +1299,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; // Output bool *PTEBufferSizeNotExceeded; @@ -1387,7 +1536,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals { double vm_row_bw; double flip_and_active_bw; double flip_and_prefetch_bw; - double flip_and_prefetch_bw_oto; + double flip_and_prefetch_bw_max; double active_and_excess_bw; }; @@ -1416,6 +1565,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals { struct dml2_core_shared_rq_dlg_get_dlg_reg_locals { unsigned int plane_idx; + unsigned int stream_idx; enum dml2_source_format_class source_format; const struct dml2_timing_cfg *timing; bool dual_plane; @@ -1623,6 +1773,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { double *BytePerPixDETC; unsigned int *DPPPerSurface; bool mrq_present; + unsigned int dummy[2][DML2_MAX_PLANES]; + unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES]; + unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES]; // output unsigned int *req_per_swath_ub_l; @@ -1640,6 +1793,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { unsigned int *DETBufferSizeC; unsigned int *full_swath_bytes_l; unsigned int *full_swath_bytes_c; + unsigned int *full_swath_bytes_single_dpp_l; + unsigned int *full_swath_bytes_single_dpp_c; bool *UnboundedRequestEnabled; unsigned int *compbuf_reserved_space_64b; unsigned int *CompressedBufferSizeInkByte; @@ -1799,7 +1954,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params { double *VRatioPrefetchC; double *RequiredPrefetchPixelDataBWLuma; double *RequiredPrefetchPixelDataBWChroma; - double *RequiredPrefetchBWOTO; + double *RequiredPrefetchBWMax; bool *NotEnoughTimeForDynamicMetadata; double *Tno_bw; double *Tno_bw_flip; @@ -2036,7 +2191,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params { double *surface_read_bandwidth_c; double *prefetch_bandwidth_l; double *prefetch_bandwidth_c; - double *prefetch_bandwidth_oto; + double *prefetch_bandwidth_max; double *excess_vactive_fill_bw_l; double *excess_vactive_fill_bw_c; double *cursor_bw; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index 2504d9c2ec34..5f301befed16 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -82,7 +82,7 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format) val = 0; break; default: - DML2_ASSERT(0); + DML_ASSERT(0); break; } return val; @@ -145,7 +145,7 @@ bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format) val = 0; break; default: - DML2_ASSERT(0); + DML_ASSERT(0); break; } return val; @@ -208,7 +208,7 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format) val = 1; break; default: - DML2_ASSERT(0); + DML_ASSERT(0); break; } return val; @@ -216,104 +216,104 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format) void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only) { - dml2_printf("DML: ===================================== \n"); - dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n"); + DML_LOG_VERBOSE("DML: ===================================== \n"); + DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n"); if (!fail_only || support->ScaleRatioAndTapsSupport == 0) - dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport); + DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport); if (!fail_only || support->SourceFormatPixelAndScanSupport == 0) - dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport); + DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport); if (!fail_only || support->ViewportSizeSupport == 0) - dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); + DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport); if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1) - dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion); + DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion); if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1) - dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated); + DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated); if (!fail_only || support->BPPForMultistreamNotIndicated == 1) - dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated); + DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated); if (!fail_only || support->MultistreamWithHDMIOreDP == 1) - dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP); + DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP); if (!fail_only || support->ExceededMultistreamSlots == 1) - dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); + DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots); if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1) - dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink); + DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink); if (!fail_only || support->NotEnoughLanesForMSO == 1) - dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO); + DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO); if (!fail_only || support->P2IWith420 == 1) - dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420); + DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420); if (!fail_only || support->DSC422NativeNotSupported == 1) - dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); + DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported); if (!fail_only || support->DSCSlicesODMModeSupported == 0) - dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); + DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported); if (!fail_only || support->NotEnoughDSCUnits == 1) - dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits); + DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits); if (!fail_only || support->NotEnoughDSCSlices == 1) - dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices); + DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices); if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1) - dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); + DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe); if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1) - dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); + DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen); if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1) - dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported); + DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported); if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0) - dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); + DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport); if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1) - dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported); + DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported); if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1) - dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); + DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState); if (!fail_only || support->ROBSupport == 0) - dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport); + DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport); if (!fail_only || support->OutstandingRequestsSupport == 0) - dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport); + DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport); if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0) - dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance); + DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance); if (!fail_only || support->DISPCLK_DPPCLK_Support == 0) - dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); + DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support); if (!fail_only || support->TotalAvailablePipesSupport == 0) - dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); + DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport); if (!fail_only || support->NumberOfOTGSupport == 0) - dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); + DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport); if (!fail_only || support->NumberOfHDMIFRLSupport == 0) - dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); + DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport); if (!fail_only || support->NumberOfDP2p0Support == 0) - dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); + DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support); if (!fail_only || support->EnoughWritebackUnits == 0) - dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); + DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits); if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0) - dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); + DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport); if (!fail_only || support->WritebackLatencySupport == 0) - dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); + DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport); if (!fail_only || support->CursorSupport == 0) - dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport); + DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport); if (!fail_only || support->PitchSupport == 0) - dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport); + DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport); if (!fail_only || support->ViewportExceedsSurface == 1) - dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); + DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface); if (!fail_only || support->PrefetchSupported == 0) - dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported); + DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported); if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0) - dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); + DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport); if (!fail_only || support->AvgBandwidthSupport == 0) - dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); + DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport); if (!fail_only || support->DynamicMetadataSupported == 0) - dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported); + DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported); if (!fail_only || support->VRatioInPrefetchSupported == 0) - dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported); + DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported); if (!fail_only || support->PTEBufferSizeNotExceeded == 0) - dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); + DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded); if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0) - dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded); + DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded); if (!fail_only || support->ExceededMALLSize == 1) - dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); + DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize); if (!fail_only || support->g6_temp_read_support == 0) - dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); + DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support); if (!fail_only || support->ImmediateFlipSupport == 0) - dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); + DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport); if (!fail_only || support->LinkCapacitySupport == 0) - dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); + DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport); if (!fail_only || support->ModeSupport == 0) - dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport); - dml2_printf("DML: ===================================== \n"); + DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport); + DML_LOG_VERBOSE("DML: ===================================== \n"); } const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type) @@ -358,9 +358,9 @@ void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_di out_bpp[k] = 0; } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc); - dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable); - dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]); + DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc); + DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable); + DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]); #endif } } @@ -391,7 +391,7 @@ unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const } #ifdef __DML_VBA_DEBUG__ - dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes); + DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes); #endif return num_active_pipes; } @@ -452,7 +452,7 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw else if (sw_mode == dml2_gfx11_sw_256kb_r_x) return 262144; else { - DML2_ASSERT(0); + DML_ASSERT(0); return 256; }; } @@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode) { - return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements); + return sw_mode == dml2_sw_linear; }; @@ -498,8 +498,8 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode) sw_mode == dml2_gfx11_sw_256kb_r_x) version = 11; else { - dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode); - DML2_ASSERT(0); + DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode); + DML_ASSERT(0); } return version; @@ -511,7 +511,7 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co unsigned int index = 0; for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz); + DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz); if (i == 0) index = 0; @@ -524,8 +524,8 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co } } #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz); - dml2_printf("DML::%s: index = %d\n", __func__, index); + DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); + DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index); #endif return index; } @@ -533,32 +533,32 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table) { unsigned int i; - bool clk_entry_found = 0; + bool clk_entry_found = false; for (i = 0; i < clk_table->uclk.num_clk_values; i++) { - dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]); + DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]); if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) { - clk_entry_found = 1; + clk_entry_found = true; break; } } if (!clk_entry_found) - DML2_ASSERT(clk_entry_found); + DML_ASSERT(clk_entry_found); #if defined(__DML_VBA_DEBUG__) - dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); - dml2_printf("DML::%s: index = %d\n", __func__, i); + DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz); + DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i); #endif return i; } bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format) { - bool ret_val = 0; + bool ret_val = false; if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha)) - ret_val = 1; + ret_val = true; return ret_val; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index 15507926f3a4..22969a533a7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c @@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo if (result) result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk); - if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk); - for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { if (result) result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk); @@ -754,6 +751,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz); + dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz); + dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz); @@ -768,6 +767,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz); + dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz); + dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz); dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c index f4b1a7d02d42..a265f254152c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c @@ -182,6 +182,10 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_ min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1]; min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1]; + min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0)); + min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0)); + min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0)); + min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1]; min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index f50662b83296..d88b3e0082dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -659,7 +659,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) { switch (i) { case 1: - DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); /* populate list */ pmo_dcn4_fams2_expand_base_pstate_strategies( @@ -670,7 +670,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 2: - DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); /* populate list */ pmo_dcn4_fams2_expand_base_pstate_strategies( @@ -681,7 +681,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 3: - DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); /* populate list */ pmo_dcn4_fams2_expand_base_pstate_strategies( @@ -692,7 +692,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 4: - DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); /* populate list */ pmo_dcn4_fams2_expand_base_pstate_strategies( diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c index dc2ce5e77f57..4a7c4c62111e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c @@ -761,7 +761,7 @@ bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache total_mcaches_required--; } } - dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required); + DML_LOG_VERBOSE("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required); if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) { result = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c deleted file mode 100644 index c506667897c4..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#include "dml2_debug.h" - -int dml2_log_internal(const char *format, ...) -{ - return 0; -} - -int dml2_printf(const char *format, ...) -{ -#ifdef _DEBUG -#ifdef _DEBUG_PRINTS - int result; - va_list args; - va_start(args, format); - - result = vprintf(format, args); - - va_end(args); - - return result; -#else - return 0; -#endif -#else - return 0; -#endif -} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h index bfe6f236d2e4..611c80f4f1bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h @@ -5,55 +5,185 @@ #ifndef __DML2_DEBUG_H__ #define __DML2_DEBUG_H__ -#ifndef DML2_ASSERT -#define DML2_ASSERT(condition) ((void)0) -#endif +#include "os_types.h" +#define DML_ASSERT(condition) ASSERT(condition) +#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN +#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__) -/* - * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart. - * DML_LOG_ERROR - unexpected but recoverable failures inside DML - * DML_LOG_WARN - unexpected inputs or events to DML - * DML_LOG_INFO - high level tracing of DML interfaces - * DML_LOG_DEBUG - detailed tracing of DML internal components - * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure - */ -#if !defined(DML_LOG_LEVEL) -#if defined(_DEBUG) && defined(_DEBUG_PRINTS) -/* for backward compatibility with old macros */ -#define DML_LOG_LEVEL 5 -#else -#define DML_LOG_LEVEL 0 -#endif -#endif +/* private helper macros */ +#define _BOOL_FORMAT(field) "%s", field ? "true" : "false" +#define _UINT_FORMAT(field) "%u", field +#define _INT_FORMAT(field) "%d", field +#define _DOUBLE_FORMAT(field) "%lf", field +#define _ELEMENT_FUNC "function" +#define _ELEMENT_COMP_IF "component_interface" +#define _ELEMENT_TOP_IF "top_interface" +#define _LOG_ENTRY(element) do { \ + DML_LOG_INTERNAL("<"element" name=\""); \ + DML_LOG_INTERNAL(__func__); \ + DML_LOG_INTERNAL("\">\n"); \ +} while (0) +#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n") +#define _LOG_SCALAR(field, format) do { \ + DML_LOG_INTERNAL(#field" = "format(field)); \ + DML_LOG_INTERNAL("\n"); \ +} while (0) +#define _LOG_ARRAY(field, size, format) do { \ + DML_LOG_INTERNAL(#field " = ["); \ + for (int _i = 0; _i < (int) size; _i++) { \ + DML_LOG_INTERNAL(format(field[_i])); \ + if (_i + 1 == (int) size) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ +}} while (0) +#define _LOG_2D_ARRAY(field, size0, size1, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL(format(field[_i][_j])); \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) +#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL("["); \ + for (int _k = 0; _k < (int) size2; _k++) { \ + DML_LOG_INTERNAL(format(field[_i][_j][_k])); \ + if (_k + 1 == (int) size2) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) -#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__) -#if DML_LOG_LEVEL >= 1 -#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__) +/* fatal errors for unrecoverable DML states until a full reset */ +#define DML_LOG_LEVEL_FATAL 0 +/* unexpected but recoverable failures inside DML */ +#define DML_LOG_LEVEL_ERROR 1 +/* unexpected inputs or events to DML */ +#define DML_LOG_LEVEL_WARN 2 +/* high level tracing of DML interfaces */ +#define DML_LOG_LEVEL_INFO 3 +/* tracing of DML internal executions */ +#define DML_LOG_LEVEL_DEBUG 4 +/* detailed tracing of DML calculation procedure */ +#define DML_LOG_LEVEL_VERBOSE 5 + +#ifndef DML_LOG_LEVEL +#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT +#endif /* #ifndef DML_LOG_LEVEL */ + +/* public macros for DML_LOG_LEVEL_FATAL and up */ +#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_ERROR and up */ +#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR +#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__) +#define DML_ASSERT_MSG(condition, fmt, ...) \ + do { \ + if (!(condition)) { \ + DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \ + DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ + DML_ASSERT(condition); \ + } \ + } while (0) #else #define DML_LOG_ERROR(fmt, ...) ((void)0) +#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0) #endif -#if DML_LOG_LEVEL >= 2 -#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_WARN and up */ +#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN +#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__) #else #define DML_LOG_WARN(fmt, ...) ((void)0) #endif -#if DML_LOG_LEVEL >= 3 -#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_INFO and up */ +#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO +#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__) +#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF) +#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF) #else #define DML_LOG_INFO(fmt, ...) ((void)0) +#define DML_LOG_TOP_IF_ENTER() ((void)0) +#define DML_LOG_TOP_IF_EXIT() ((void)0) #endif -#if DML_LOG_LEVEL >= 4 -#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_DEBUG and up */ +#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG +#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) +#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF) +#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF) +#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC) +#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC) +#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT) +#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT) +#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT) +#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT) #else #define DML_LOG_DEBUG(fmt, ...) ((void)0) +#define DML_LOG_COMP_IF_ENTER() ((void)0) +#define DML_LOG_COMP_IF_EXIT() ((void)0) +#define DML_LOG_FUNC_ENTER() ((void)0) +#define DML_LOG_FUNC_EXIT() ((void)0) +#define DML_LOG_DEBUG_BOOL(field) ((void)0) +#define DML_LOG_DEBUG_UINT(field) ((void)0) +#define DML_LOG_DEBUG_INT(field) ((void)0) +#define DML_LOG_DEBUG_DOUBLE(field) ((void)0) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0) #endif -#if DML_LOG_LEVEL >= 5 -#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_VERBOSE */ +#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE +#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) #else #define DML_LOG_VERBOSE(fmt, ...) ((void)0) -#endif - -int dml2_log_internal(const char *format, ...); -int dml2_printf(const char *format, ...); - -#endif +#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */ +#endif /* __DML2_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h index d8d01dceacdd..d52aa82283b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h @@ -38,6 +38,12 @@ struct dml2_mcg_min_clock_table { } max_clocks_khz; struct { + unsigned int dispclk; + unsigned int dppclk; + unsigned int dtbclk; + } max_ss_clocks_khz; + + struct { unsigned int dprefclk; unsigned int xtalclk; unsigned int pcierefclk; @@ -196,6 +202,8 @@ struct dml2_core_mode_support_result { } active; unsigned int dispclk_khz; + unsigned int dpprefclk_khz; + unsigned int dtbrefclk_khz; unsigned int dcfclk_deepsleep_khz; unsigned int socclk_khz; @@ -440,13 +448,17 @@ struct dml2_core_internal_state_intermediates { }; struct dml2_core_mode_support_locals { - struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + union { + struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params; }; struct dml2_core_mode_programming_locals { - struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + union { + struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index a966abd40788..5f1b49a50049 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -1082,22 +1082,22 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s if (stream_disp_cfg_index >= disp_cfg_index_max) continue; - if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) { - scratch.odm_info.odm_factor = 1; - } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) { - scratch.odm_info.odm_factor = 2; - } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) { - scratch.odm_info.odm_factor = 4; - } else { - ASSERT(false); - scratch.odm_info.odm_factor = 1; - } - + if (ctx->architecture == dml2_architecture_20) { + if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) { + scratch.odm_info.odm_factor = 1; + } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) { + scratch.odm_info.odm_factor = 2; + } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) { + scratch.odm_info.odm_factor = 4; + } else { + ASSERT(false); + scratch.odm_info.odm_factor = 1; + } + } else if (ctx->architecture == dml2_architecture_21) { /* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0. * This is not an issue with new resource management logic. This block ensure backcompat * with legacy pipe management with updated DML. * */ - if (ctx->architecture == dml2_architecture_21) { if (ODMMode[stream_disp_cfg_index] == 1) { scratch.odm_info.odm_factor = 1; } else if (ODMMode[stream_disp_cfg_index] == 2) { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 6b3b8803e0ae..a56e75cdf712 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -868,7 +868,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 70c39df62533..3b866e876bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -590,11 +590,11 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; + p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0; p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; /* Dependent states. */ - p->out_states->state_array[i].dscclk_mhz = p->in_states->state_array[i].dscclk_mhz; p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts; p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz; p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz; @@ -896,7 +896,7 @@ static void populate_dummy_dml_surface_cfg(struct dml_surface_cfg_st *out, unsig out->SurfaceWidthC[location] = in->timing.h_addressable; out->SurfaceHeightC[location] = in->timing.v_addressable; out->PitchY[location] = ((out->SurfaceWidthY[location] + 127) / 128) * 128; - out->PitchC[location] = 0; + out->PitchC[location] = 1; out->DCCEnable[location] = false; out->DCCMetaPitchY[location] = 0; out->DCCMetaPitchC[location] = 0; @@ -953,6 +953,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p out->SourcePixelFormat[location] = dml_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: out->SourcePixelFormat[location] = dml_444_64; @@ -973,7 +974,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out) +static struct scaler_data *get_scaler_data_for_plane( + const struct dc_plane_state *in, + struct dc_state *context) { int i; struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; @@ -994,7 +997,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc } ASSERT(i < MAX_PIPES); - memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out)); + return &temp_pipe->plane_res.scl_data; } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, @@ -1057,11 +1060,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out const struct dc_plane_state *in, struct dc_state *context, const struct soc_bounding_box_st *soc) { - struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL); - if (!scaler_data) - return; - - get_scaler_data_for_plane(in, context, scaler_data); + struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context); out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; @@ -1126,8 +1125,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->DynamicMetadataTransmittedBytes[location] = 0; out->NumberOfCursors[location] = 1; - - kfree(scaler_data); } static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2, @@ -1192,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2 return location; } -static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg) -{ - int i; - - if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - ASSERT(state->stream_count == 1); - dml_dispcfg->timing.DRRDisplay[0] = true; - } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) { - - for (i = 0; i < dml_dispcfg->num_timings; i++) { - if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id) - dml_dispcfg->timing.DRRDisplay[i] = true; - } - } -} - static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state) { unsigned int i; @@ -1440,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat } } } - - if (!dml2->config.use_native_pstate_optimization) - apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg); } void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 939ee0708bd2..0318260370ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -93,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2, static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, const struct dml_display_cfg_st *display_cfg, - struct dml_mode_support_info_st *evaluation_info) + struct dml_mode_support_info_st *evaluation_info, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx; s->mode_support_params.in_display_cfg = display_cfg; + if (validate_mode == DC_VALIDATE_MODE_ONLY) + s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1; + else + s->mode_support_params.in_start_state_idx = 0; s->mode_support_params.out_evaluation_info = evaluation_info; memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st)); @@ -110,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p) { int unused_dpps = p->ip_params->max_num_dpp; - int i, j; - int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count; - int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index; - float frame_time_sec, max_frame_time_sec; + int i; + int odms_needed; int largest_blend_and_timing = 0; bool optimization_done = false; @@ -128,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe if (p->new_display_config != p->cur_display_config) *p->new_display_config = *p->cur_display_config; - // Optimize P-State Support - if (dml2->config.use_native_pstate_optimization) { - if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) { - // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp - subvp_timing_to_add = -1; - subvp_surface_to_add = -1; - max_frame_time_sec = 0; - surface_count = 0; - for (i = 0; i < (int) p->cur_display_config->num_timings; i++) { - refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000, - (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i])); - if (refresh_rate_hz < 120) { - // Check its upstream surfaces to see if this one could be converted to subvp. - dpps_needed = 0; - for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) { - if (p->cur_display_config->plane.BlendingAndTiming[j] == i && - p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) { - dpps_needed += p->cur_mode_support_info->DPPPerSurface[j]; - subvp_surface_to_add = j; - surface_count++; - } - } - - if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) { - frame_time_sec = (float)1 / refresh_rate_hz; - if (frame_time_sec > max_frame_time_sec) { - max_frame_time_sec = frame_time_sec; - subvp_timing_to_add = i; - } - } - } - } - if (subvp_timing_to_add >= 0) { - new_timing_index = p->new_display_config->num_timings++; - new_surface_index = p->new_display_config->num_surfaces++; - // Add a phantom pipe reflecting the main pipe's timing - dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add); - - pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us + - p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) * - (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) / - (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]); - - subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines; - - p->new_display_config->timing.VActive[new_timing_index] = subvp_height; - p->new_display_config->timing.VTotal[new_timing_index] = subvp_height + - p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add]; - - p->new_display_config->output.OutputDisabled[new_timing_index] = true; - - p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport; - - dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add); - dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add); - - p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportStationary[new_surface_index] = false; - - p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable; - p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe; - - p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0; - - p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required; - - p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index; - - optimization_done = true; - } - } - } // Optimize Clocks if (!optimization_done) { @@ -224,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe return optimization_done; } -static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state) +static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch; struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch; @@ -266,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us; } - dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info); + dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info, + validate_mode); if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) { map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info); @@ -331,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const } static bool dml_mode_support_wrapper(struct dml2_context *dml2, - struct dc_state *display_state) + struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; unsigned int result = 0, i; @@ -367,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info); @@ -388,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, dml2->v20.dml_core_ctx.policy = s->new_policy; optimized_result = pack_and_call_dml_mode_support_ex(dml2, &s->new_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (optimized_result) optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info); @@ -407,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, if (!optimized_result) { result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); } } @@ -417,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, return result; } -static int find_drr_eligible_stream(struct dc_state *display_state) -{ - int i; - - for (i = 0; i < display_state->stream_count; i++) { - if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE - && display_state->streams[i]->ignore_msa_timing_param) { - // Use ignore_msa_timing_param flag to identify as DRR - return i; - } - } - - return -1; -} - -static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state) -{ - struct dml2_wrapper_scratch *s = &dml2->v20.scratch; - bool pstate_optimization_done = false; - bool pstate_optimization_success = false; - bool result = false; - int drr_display_index = 0, non_svp_streams = 0; - bool force_svp = dml2->config.svp_pstate.force_enable_subvp; - - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - - result = dml_mode_support_wrapper(dml2, display_state); - - if (!result) { - pstate_optimization_done = true; - } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } - - if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) { - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; - - result = dml_mode_support_wrapper(dml2, display_state); - } else { - non_svp_streams = display_state->stream_count; - - while (!pstate_optimization_done) { - result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true); - - // Always try adding SVP first - if (result) - result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info); - else - pstate_optimization_done = true; - - - if (result) { - result = dml_mode_support_wrapper(dml2, display_state); - } else { - pstate_optimization_done = true; - } - - if (result) { - non_svp_streams--; - - if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } else { - drr_display_index = find_drr_eligible_stream(display_state); - - // If there is only 1 remaining non SubVP pipe that is DRR, check static - // schedulability for SubVP + DRR. - if (non_svp_streams == 1 && drr_display_index >= 0) { - if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) { - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index; - result = dml_mode_support_wrapper(dml2, display_state); - } - - if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } - - if (pstate_optimization_success) { - pstate_optimization_done = true; - } else { - pstate_optimization_done = false; - } - } - } - } - } - - if (!pstate_optimization_success) { - dml2_svp_remove_all_phantom_pipes(dml2, display_state); - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - result = dml_mode_support_wrapper(dml2, display_state); - } - - return result; -} - -static bool call_dml_mode_support_and_programming(struct dc_state *context) +static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode) { unsigned int result = 0; unsigned int min_state = 0; @@ -542,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) struct dml2_wrapper_scratch *s = &dml2->v20.scratch; if (!context->streams[0]->sink->link->dc->caps.is_apu) { - min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context); + min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context, + validate_mode); ASSERT(min_state_for_g6_temp_read >= 0); } - if (!dml2->config.use_native_pstate_optimization) { - result = optimize_pstate_with_svp_and_drr(dml2, context); - } else { - result = dml_mode_support_wrapper(dml2, context); - } + result = dml_mode_support_wrapper(dml2, context, validate_mode); /* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on. * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly. @@ -573,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) return result; } -static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context) +static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context, + enum dc_validate_mode validate_mode) { struct dml2_context *dml2 = context->bw_ctx.dml2; struct dml2_wrapper_scratch *s = &dml2->v20.scratch; @@ -609,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4); - result = call_dml_mode_support_and_programming(context); + result = call_dml_mode_support_and_programming(context, validate_mode); /* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation * is required or not, the resource context needs to correctly reflect the number of active pipes. We would * only know the correct number if active pipes after dml2_map_dc_pipes is called. @@ -626,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch); if (need_recalculation) { /* Engage the DML again if recalculation is required. */ - call_dml_mode_support_and_programming(context); + call_dml_mode_support_and_programming(context, validate_mode); if (!dml2->config.skip_hw_state_mapping) { dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state); } @@ -661,7 +484,10 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_copy_clocks_to_dc_state(&out_clks, context); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx); - memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); + if (context->streams[0]->sink->link->dc->caps.is_apu) + dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx); + else + memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx); //copy for deciding zstate use @@ -679,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s return result; } -static bool dml2_validate_only(struct dc_state *context) +static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode) { struct dml2_context *dml2; unsigned int result = 0; @@ -703,7 +529,8 @@ static bool dml2_validate_only(struct dc_state *context) result = pack_and_call_dml_mode_support_ex(dml2, &dml2->v20.scratch.cur_display_config, - &dml2->v20.scratch.mode_support_info); + &dml2->v20.scratch.mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info); @@ -718,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, + enum dc_validate_mode validate_mode) { bool out = false; @@ -728,27 +556,32 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 /* DML2.1 validation path */ if (dml2->architecture == dml2_architecture_21) { - out = dml21_validate(in_dc, context, dml2, fast_validate); + out = dml21_validate(in_dc, context, dml2, validate_mode); return out; } - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) - out = dml2_validate_only(context); + DC_FP_START(); + + /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + out = dml2_validate_only(context, validate_mode); else - out = dml2_validate_and_build_resource(in_dc, context); + out = dml2_validate_and_build_resource(in_dc, context, validate_mode); + + DC_FP_END(); + return out; } static inline struct dml2_context *dml2_allocate_memory(void) { - return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + return (struct dml2_context *) vzalloc(sizeof(struct dml2_context)); } static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } @@ -779,19 +612,21 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op break; } + DC_FP_START(); + initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip); initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); + + DC_FP_END(); } bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. - if ((in_dc->debug.using_dml21) - && (in_dc->ctx->dce_version == DCN_VERSION_4_01 - )) + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) return dml21_create(in_dc, dml2, config); // Allocate Mode Lib Ctx @@ -812,7 +647,7 @@ void dml2_destroy(struct dml2_context *dml2) if (dml2->architecture == dml2_architecture_21) dml21_destroy(dml2); - kfree(dml2); + vfree(dml2); } void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2, @@ -860,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 785226945699..c384e141cebc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -40,6 +40,7 @@ struct dc_sink; struct dc_stream_state; struct resource_context; struct display_stream_compressor; +struct dc_mcache_params; // Configuration of the MALL on the SoC struct dml2_soc_mall_info { @@ -107,6 +108,7 @@ struct dml2_dc_callbacks { unsigned int (*get_max_flickerless_instant_vtotal_increase)( struct dc_stream_state *stream, bool is_gaming); + bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params); }; struct dml2_dc_svp_callbacks { @@ -238,7 +240,7 @@ struct dml2_configuration_options { bool use_clock_dc_limits; bool gpuvm_enable; bool force_tdlut_enable; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; /* @@ -270,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc, * dml2_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx. * * DML1.0 compatible interface for validation. * @@ -293,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc, bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info. diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index abf439e743f2..2d70586cef40 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -790,8 +790,7 @@ static bool dpp3_program_blnd_lut(struct dpp *dpp_base, if (params == NULL) { REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_blnd_lut(dpp_base, false); + dpp3_power_on_blnd_lut(dpp_base, false); return false; } @@ -1204,8 +1203,7 @@ static bool dpp3_program_shaper(struct dpp *dpp_base, if (params == NULL) { REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_shaper(dpp_base, false); + dpp3_power_on_shaper(dpp_base, false); return false; } @@ -1399,8 +1397,7 @@ static bool dpp3_program_3dlut(struct dpp *dpp_base, if (params == NULL) { dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_hdr3dlut(dpp_base, false); + dpp3_power_on_hdr3dlut(dpp_base, false); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index 62b7012cda43..f7a373a3d70a 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -138,7 +138,7 @@ bool dpp35_construct( dpp->base.funcs = &dcn35_dpp_funcs; // w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs - if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10) + if (dpp->base.ctx->asic_id.hw_internal_rev < 0x40) dpp->dispclk_r_gate_disable = true; return ret; } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 97bf26fa3573..36187f890d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .dpp_program_regamma_pwl = NULL, .dpp_set_pre_degam = dpp3_set_pre_degam, .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp401_full_bypass, + .dpp_full_bypass = NULL, .dpp_setup = dpp401_dpp_setup, .dpp_program_degamma_pwl = NULL, .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h index ecaa976e1f52..5a6a861402b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h @@ -641,6 +641,7 @@ uint32_t ISHARP_DELTA_DATA; \ uint32_t ISHARP_DELTA_INDEX; \ uint32_t ISHARP_NLDELTA_SOFT_CLIP + struct dcn401_dpp_registers { DPP_REG_VARIABLE_LIST_DCN401; }; @@ -683,8 +684,6 @@ void dpp401_dscl_set_scaler_manual_scale( struct dpp *dpp_base, const struct scaler_data *scl_data); -void dpp401_full_bypass(struct dpp *dpp_base); - void dpp401_dpp_setup( struct dpp *dpp_base, enum surface_pixel_format format, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 1236e0f9a256..7aab77b58869 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -88,30 +88,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -void dpp401_full_bypass(struct dpp *dpp_base) -{ - struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); - - /* Input pixel format: ARGB8888 */ - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, 0x8); - - /* Zero expansion */ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_CONTROL__ALPHA_EN, 0, - FORMAT_EXPANSION_MODE, 0); - - /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ - if (dpp->tf_mask->CM_BYPASS_EN) - REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); - else - REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); - - /* Setting degamma bypass for now */ - REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); -} - void dpp401_set_cursor_attributes( struct dpp *dpp_base, struct dc_cursor_attributes *cursor_attributes) @@ -120,10 +96,11 @@ void dpp401_set_cursor_attributes( enum dc_cursor_color_format color_format = cursor_attributes->color_format; int cur_rom_en = 0; - // DCN4 should always do Cursor degamma for Cursor Color modes if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - cur_rom_en = 1; + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } } REG_UPDATE_3(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 11535922b5ff..1f53a9f0c0ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -30,6 +30,9 @@ #include "rc_calc.h" #include "fixed31_32.h" +#include "clk_mgr.h" +#include "resource.h" + #define DC_LOGGER \ dsc->ctx->logger @@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } /* Forward Declerations */ +static unsigned int get_min_dsc_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing); + static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, @@ -183,6 +191,7 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slice_count, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, return true; } - /* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and * timing's pixel clock and uncompressed bandwidth. * If DSC is not possible, leave '*range' untouched. @@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; + unsigned int min_dsc_slice_count; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config = {0}; @@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range( get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, link_encoding, &config); + &options, link_encoding, min_dsc_slice_count, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, @@ -525,20 +536,153 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc, DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp); } + +static void build_dsc_enc_combined_slice_caps( + const struct dsc_enc_caps *single_dsc_enc_caps, + struct dsc_enc_caps *dsc_enc_caps, + unsigned int max_odm_combine_factor) +{ + /* 1-16 slice configurations, single DSC */ + dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw; + + /* 2x DSC's */ + if (max_odm_combine_factor >= 2) { + /* 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + + /* 8 + 8 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; + } + + /* 3x DSC's */ + if (max_odm_combine_factor >= 3) { + /* 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } + + /* 4x DSC's */ + if (max_odm_combine_factor >= 4) { + /* 1 + 1 + 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 + 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 3 + 3 + 3 + 3 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3; + + /* 4 + 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } +} + +static void build_dsc_enc_caps( + const struct display_stream_compressor *dsc, + struct dsc_enc_caps *dsc_enc_caps) +{ + unsigned int max_dscclk_khz; + unsigned int num_dsc; + unsigned int max_odm_combine_factor; + struct dsc_enc_caps single_dsc_enc_caps; + + struct dc *dc; + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps) + return; + + dc = dsc->ctx->dc; + + if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc) + return; + + /* get max DSCCLK from clk_mgr */ + max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK); + + dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz); + + /* global capabilities */ + dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version; + dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth; + dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported; + dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width; + dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div; + dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw; + dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw; + + /* expand per DSC capabilities to global */ + max_odm_combine_factor = dc->caps.max_odm_combine_factor; + num_dsc = dc->res_pool->res_cap->num_dsc; + max_odm_combine_factor = min(max_odm_combine_factor, num_dsc); + dsc_enc_caps->max_total_throughput_mps = + single_dsc_enc_caps.max_total_throughput_mps * + max_odm_combine_factor; + + /* check slice counts possible for with ODM combine */ + build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor); +} + +static inline uint32_t dsc_div_by_10_round_up(uint32_t value) +{ + return (value + 9) / 10; +} + +static unsigned int get_min_dsc_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing) +{ + unsigned int max_dispclk_khz; + + /* get max pixel rate and combine caps */ + max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000; + if (dsc && dsc->ctx->dc) { + if (dsc->ctx->dc->clk_mgr && + dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) { + /* dispclk is available */ + max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK); + } + } + + /* validate parameters */ + if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0) + return 1; + + /* consider minimum odm slices required due to + * 1) display pipe throughput (dispclk) + * 2) max image width per slice + */ + return dc_fixpt_ceil(dc_fixpt_max( + dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)), + max_dispclk_khz), // throughput + dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right), + dsc_enc_caps->max_slice_width))); // slice width +} + static void get_dsc_enc_caps( const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { - // This is a static HW query, so we can use any DSC - memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) { - if (!dsc->ctx->dc->debug.disable_dsc) - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); - if (dsc->ctx->dc->debug.native422_support) - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc) + return; + + /* check if reported cap global or only for a single DCN DSC enc */ + if (dsc->funcs->dsc_get_enc_caps) { + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + } else { + build_dsc_enc_caps(dsc, dsc_enc_caps); } + + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } /* Returns 'false' if no intersection was found for at least one capability. @@ -621,11 +765,6 @@ static bool intersect_dsc_caps( return true; } -static inline uint32_t dsc_div_by_10_round_up(uint32_t value) -{ - return (value + 9) / 10; -} - static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bandwidth_in_kbps, const struct dc_crtc_timing *timing, @@ -910,11 +1049,11 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slices_h, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; int max_slices_h = 0; - int min_slices_h = 0; int num_slices_h = 0; int pic_width; int slice_width; @@ -1018,12 +1157,9 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - min_slices_h = pic_width / dsc_common_caps.max_slice_width; - if (pic_width % dsc_common_caps.max_slice_width) - min_slices_h++; - min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h); + /* increase minimum slice count to meet sink throughput limitations */ while (min_slices_h <= max_slices_h) { int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h; if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000) @@ -1032,14 +1168,12 @@ static bool setup_dsc_config( min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); } - is_dsc_possible = (min_slices_h <= max_slices_h); - - if (pic_width % min_slices_h != 0) - min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? - - if (min_slices_h == 0 && max_slices_h == 0) - is_dsc_possible = false; + /* increase minimum slice count to meet divisibility requirements */ + while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) { + min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); + } + is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0; if (!is_dsc_possible) goto done; @@ -1162,12 +1296,19 @@ bool dc_dsc_compute_config( { bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - + unsigned int min_dsc_slice_count; get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, link_encoding, dsc_cfg); + timing, + options, + link_encoding, + min_dsc_slice_count, + dsc_cfg); return is_dsc_possible; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index 75128fd34306..bd1b9aef6d5c 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -57,13 +57,6 @@ static const struct dsc_funcs dcn20_dsc_funcs = { #define DC_LOGGER \ dsc->ctx->logger -enum dsc_bits_per_comp { - DSC_BPC_8 = 8, - DSC_BPC_10 = 10, - DSC_BPC_12 = 12, - DSC_BPC_UNKNOWN -}; - /* API functions (external or via structure->function_pointer) */ void dsc2_construct(struct dcn20_dsc *dsc, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h index 1fb90b52b814..a9c04fc95bd1 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h @@ -457,6 +457,12 @@ type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \ type DSCRM_DSC_FORWARD_EN_STATUS +enum dsc_bits_per_comp { + DSC_BPC_8 = 8, + DSC_BPC_10 = 10, + DSC_BPC_12 = 12, + DSC_BPC_UNKNOWN +}; struct dcn20_dsc_registers { uint32_t DSC_TOP_CONTROL; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 4893b793fec0..7bd92ae8b13e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -9,19 +9,14 @@ #include "dsc/dscc_types.h" #include "dsc/rc_calc.h" -#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000 -#define MAX_DSC_UNIT_COMBINE 4 - static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); /* Object I/F functions */ //static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); //static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); static const struct dsc_funcs dcn401_dsc_funcs = { - .dsc_get_enc_caps = dsc401_get_enc_caps, .dsc_read_state = dsc401_read_state, .dsc_validate_stream = dsc401_validate_stream, .dsc_set_config = dsc401_set_config, @@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disable = dsc401_disable, .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, + .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, }; /* Macro definitios for REG_SET macros*/ @@ -45,12 +41,6 @@ static const struct dsc_funcs dcn401_dsc_funcs = { #define DC_LOGGER \ dsc->ctx->logger -enum dsc_bits_per_comp { - DSC_BPC_8 = 8, - DSC_BPC_10 = 10, - DSC_BPC_12 = 12, - DSC_BPC_UNKNOWN -}; /* API functions (external or via structure->function_pointer) */ @@ -72,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc, dsc->max_image_width = 5184; } -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz) { - int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ; - dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ - /* 1 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0; - /* 2 slice is only supported with 1 or 2 DSC units */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0; - /* 3 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; dsc_enc_caps->lb_bit_depth = 13; dsc_enc_caps->is_block_pred_supported = true; @@ -101,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; - dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE; + dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000; dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ @@ -197,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc) DSC_CLOCK_EN, 0); } -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) { struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h index e3ca70058e64..7acd57eb4f42 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h @@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe); void dsc401_disable(struct display_stream_compressor *dsc); void dsc401_disconnect(struct display_stream_compressor *dsc); +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index 1ebce5426a58..b0bd1f9425b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -108,6 +108,7 @@ struct dsc_funcs { void (*dsc_disable)(struct display_stream_compressor *dsc); void (*dsc_disconnect)(struct display_stream_compressor *dsc); void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); + void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index b099989d9364..942d9f0b6df2 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -411,6 +411,20 @@ enum dc_irq_source dal_irq_get_rx_source( } } +enum dc_irq_source dal_irq_get_read_request( + const struct gpio *irq) +{ + enum gpio_id id = dal_gpio_get_id(irq); + + switch (id) { + case GPIO_ID_HPD: + return (enum dc_irq_source)(DC_IRQ_SOURCE_DCI2C_RR_DDC1 + + dal_gpio_get_enum(irq)); + default: + return DC_IRQ_SOURCE_INVALID; + } +} + enum gpio_result dal_irq_setup_hpd_filter( struct gpio *irq, struct gpio_hpd_config *config) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c index 3f13a744d07d..01ec451004f7 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c @@ -62,7 +62,7 @@ static void dal_hw_hpd_destroy( *ptr = NULL; } -static enum gpio_result get_value( +static enum gpio_result dal_hw_hpd_get_value( const struct hw_gpio_pin *ptr, uint32_t *value) { @@ -85,7 +85,7 @@ static enum gpio_result get_value( return dal_hw_gpio_get_value(ptr, value); } -static enum gpio_result set_config( +static enum gpio_result dal_hw_hpd_set_config( struct hw_gpio_pin *ptr, const struct gpio_config_data *config_data) { @@ -104,9 +104,9 @@ static enum gpio_result set_config( static const struct hw_gpio_pin_funcs funcs = { .destroy = dal_hw_hpd_destroy, .open = dal_hw_gpio_open, - .get_value = get_value, + .get_value = dal_hw_hpd_get_value, .set_value = dal_hw_gpio_set_value, - .set_config = set_config, + .set_config = dal_hw_hpd_set_config, .change_mode = dal_hw_gpio_change_mode, .close = dal_hw_gpio_close, }; diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c index 2546224b326a..e4496ad203b2 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c @@ -132,9 +132,9 @@ int hubbub21_init_dchub(struct hubbub *hubbub, // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } - - dcn21_dchvm_init(hubbub); - + if (!hubbub1->base.ctx->dc->config.skip_riommu_prefetch_wa) { + dcn21_dchvm_init(hubbub); + } return hubbub1->num_vmid; } diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index c7765e6f09e6..f8f991785d4f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -666,10 +666,29 @@ struct dcn_mi_mask { DCN_HUBP_REG_FIELD_LIST(uint32_t); }; +struct dcn_fl_regs_st { + uint32_t lut_enable; + uint32_t lut_done; + uint32_t lut_addr_mode; + uint32_t lut_width; + uint32_t lut_tmz; + uint32_t lut_crossbar_sel_r; + uint32_t lut_crossbar_sel_g; + uint32_t lut_crossbar_sel_b; + uint32_t lut_addr_hi; + uint32_t lut_addr_lo; + uint32_t refcyc_3dlut_group; + uint32_t lut_fl_bias; + uint32_t lut_fl_scale; + uint32_t lut_fl_mode; + uint32_t lut_fl_format; +}; + struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; struct _vcs_dpi_display_ttu_regs_st ttu_attr; struct _vcs_dpi_display_rq_regs_st rq_regs; + struct dcn_fl_regs_st fl_regs; uint32_t pixel_format; uint32_t inuse_addr_hi; uint32_t inuse_addr_lo; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index 5ed195377a6c..705b98b1b6cc 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width); } -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled) +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0); + REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits); } void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, @@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected, .hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar, .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done, - .hubp_clear_tiling = hubp2_clear_tiling, + .hubp_clear_tiling = hubp401_clear_tiling, }; bool hubp401_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index 6e1d4c90ddd4..608e6153fa68 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r); -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled); +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits); void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index 40ecebea1ba0..bee617ca0838 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -27,6 +27,24 @@ # DCE ############################################################################### +ifdef CONFIG_DRM_AMD_DC_SI +HWSS_DCE60 = dce60_hwseq.o + +AMD_DAL_HWSS_DCE60 = $(addprefix $(AMDDALPATH)/dc/hwss/dce60/,$(HWSS_DCE60)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE60) +endif + +############################################################################### + +HWSS_DCE80 = dce80_hwseq.o + +AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80) + +############################################################################### + HWSS_DCE = dce_hwseq.o AMD_DAL_HWSS_DCE = $(addprefix $(AMDDALPATH)/dc/hwss/dce/,$(HWSS_DCE)) @@ -65,14 +83,6 @@ AMD_DAL_HWSS_DCE120 = $(addprefix $(AMDDALPATH)/dc/hwss/dce120/,$(HWSS_DCE120)) AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE120) -############################################################################### - -HWSS_DCE80 = dce80_hwseq.o - -AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80)) - -AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80) - ifdef CONFIG_DRM_AMD_DC_FP ############################################################################### # DCN diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 5656d10368ad..4ea13d0bf815 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -952,8 +952,8 @@ void dce110_edp_backlight_control( struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; uint8_t pwrseq_instance = 0; - unsigned int pre_T11_delay = OLED_PRE_T11_DELAY; - unsigned int post_T7_delay = OLED_POST_T7_DELAY; + unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0); + unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0); if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) != CONNECTOR_ID_EDP) { @@ -1069,7 +1069,8 @@ void dce110_edp_backlight_control( if (!enable) { /*follow oem panel config's requirement*/ pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms; - msleep(pre_T11_delay); + if (pre_T11_delay) + msleep(pre_T11_delay); } } @@ -1185,8 +1186,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) if (dccg) { dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - if (dccg && dccg->funcs->set_dtbclk_dto) - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) { + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, @@ -1220,8 +1223,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; + if (hws && hws->wa_state.skip_blank_stream) + return; + if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - if (!link->skip_implict_edp_power_control) + if (!link->skip_implict_edp_power_control && hws) hws->funcs.edp_backlight_control(link, false); link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } @@ -1375,7 +1381,7 @@ static void populate_audio_dp_link_info( } } -static void build_audio_output( +void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, struct audio_output *audio_output) @@ -1680,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + /* Temporary workaround to perform DSC programming ahead of stream enablement + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, true); + } + if (!stream->dpms_off) dc->link_srv->set_dpms_on(context, pipe_ctx); @@ -1921,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) can_apply_edp_fast_boot = dc_validate_boot_timing(dc, edp_stream->sink, &edp_stream->timing); + + // For Mux-platform, the default value is false. + // Disable fast boot during mux switching. + // The flag would be clean after switching done. + if (dc->is_switch_in_progress_dest && edp_link->is_dds) + can_apply_edp_fast_boot = false; + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; if (can_apply_edp_fast_boot) { DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n"); @@ -1964,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (edp_with_sink_num) edp_link_with_sink = edp_links_with_sink[0]; + // During a mux switch, powering down the HW blocks and then enabling + // the link via a DPCD SET_POWER write causes a brief flash + keep_edp_vdd_on |= dc->is_switch_in_progress_dest; + if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ @@ -2763,12 +2793,12 @@ static void dce110_enable_per_frame_crtc_position_reset( } -static void init_pipes(struct dc *dc, struct dc_state *context) +static void dce110_init_pipes(struct dc *dc, struct dc_state *context) { // Do nothing } -static void init_hw(struct dc *dc) +static void dce110_init_hw(struct dc *dc) { int i; struct dc_bios *bp; @@ -3327,7 +3357,7 @@ void dce110_disable_link_output(struct dc_link *link, static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, .program_output_csc = program_output_csc, - .init_hw = init_hw, + .init_hw = dce110_init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, .post_unlock_program_front_end = dce110_post_unlock_program_front_end, @@ -3371,7 +3401,7 @@ static const struct hw_sequencer_funcs dce110_funcs = { }; static const struct hwseq_private_funcs dce110_private_funcs = { - .init_pipes = init_pipes, + .init_pipes = dce110_init_pipes, .set_input_transfer_func = dce110_set_input_transfer_func, .set_output_transfer_func = dce110_set_output_transfer_func, .power_down = dce110_power_down, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 06789ac3a224..7cd8c1576988 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -110,5 +110,9 @@ void dce110_enable_dp_link_output( enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings); +void build_audio_output( + struct dc_state *state, + const struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output); #endif /* __DC_HWSS_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c index 44b56490e152..a08e9f9eec17 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "dc.h" #include "core_types.h" -#include "dce60_hw_sequencer.h" +#include "dce60_hwseq.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hwseq.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h index f3b2d8b60d5b..f3b2d8b60d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 912f96323ed6..39910f73ecd0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx, us_x10 % frac); } +/* + * Delay until we passed busy-until-point to which we can + * do necessary locking/programming on consecutive full updates + */ +void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only) +{ + struct crtc_position position; + struct dc_stream_state *stream = pipe_ctx->stream; + unsigned int vpos, frame_count; + uint32_t vupdate_start, vupdate_end, vblank_start; + unsigned int lines_to_vupdate, us_to_vupdate; + unsigned int us_per_line, us_vupdate; + + if (!pipe_ctx->stream || + !pipe_ctx->stream_res.tg || + !pipe_ctx->stream_res.stream_enc) + return; + + if (pipe_ctx->prev_odm_pipe && + pipe_ctx->stream) + return; + + if (!pipe_ctx->wait_is_required) + return; + + struct timing_generator *tg = pipe_ctx->stream_res.tg; + + if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg)) + return; + + dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start, + &vupdate_end); + + dc->hwss.get_position(&pipe_ctx, 1, &position); + vpos = position.vertical_count; + + frame_count = tg->funcs->get_frame_count(tg); + + if (frame_count - pipe_ctx->wait_frame_count > 2) + return; + + vblank_start = pipe_ctx->pipe_dlg_param.vblank_start; + + if (vpos >= vupdate_start && vupdate_start >= vblank_start) + lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start; + else + lines_to_vupdate = vupdate_start - vpos; + + us_per_line = + stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; + us_to_vupdate = lines_to_vupdate * us_per_line; + + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; + + if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start) + us_to_vupdate = 0; + + us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; + + if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) { + //surface updates come in at high irql + pipe_ctx->wait_is_required = true; + return; + } + + fsleep(us_to_vupdate + us_vupdate); + + //clear + pipe_ctx->next_vupdate = 0; + pipe_ctx->wait_frame_count = 0; + pipe_ctx->wait_is_required = false; +} + +/* + * On pipe unlock and programming, indicate pipe will be busy + * until some frame and line (vupdate), this is required for consecutive + * full updates, need to wait for updates + * to latch to try and program the next update + */ +void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + uint32_t vupdate_start, vupdate_end; + struct crtc_position position; + unsigned int vpos, cur_frame; + + if (!pipe_ctx->stream || + !pipe_ctx->stream_res.tg || + !pipe_ctx->stream_res.stream_enc) + return; + + dc->hwss.get_position(&pipe_ctx, 1, &position); + vpos = position.vertical_count; + + dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start, + &vupdate_end); + + struct timing_generator *tg = pipe_ctx->stream_res.tg; + + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + ASSERT(optc1->max_frame_count != 0); + + if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg)) + return; + + pipe_ctx->next_vupdate = vupdate_start; + + cur_frame = tg->funcs->get_frame_count(tg); + + if (vpos < vupdate_start) { + pipe_ctx->wait_frame_count = cur_frame; + } else { + if (cur_frame + 1 > optc1->max_frame_count) + pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count; + else + pipe_ctx->wait_frame_count = cur_frame + 1; + } + + pipe_ctx->wait_is_required = true; +} + void dcn10_lock_all_pipes(struct dc *dc, struct dc_state *context, bool lock) @@ -205,6 +327,35 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) } } + DTN_INFO("\n=======HUBP FL======\n"); + DTN_INFO( + "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct dcn_fl_regs_st *fl_regs = &s->fl_regs; + + if (!s->blank_en) { + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x", + pool->hubps[i]->inst, + fl_regs->lut_enable, + fl_regs->lut_done, + fl_regs->lut_addr_mode, + fl_regs->lut_width, + fl_regs->lut_tmz, + fl_regs->lut_crossbar_sel_r, + fl_regs->lut_crossbar_sel_g, + fl_regs->lut_crossbar_sel_b, + fl_regs->lut_addr_hi, + fl_regs->lut_addr_lo, + fl_regs->refcyc_3dlut_group, + fl_regs->lut_fl_bias, + fl_regs->lut_fl_scale, + fl_regs->lut_fl_mode, + fl_regs->lut_fl_format); + DTN_INFO("\n"); + } + } + DTN_INFO("\n=========RQ========\n"); DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s" " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s" @@ -389,6 +540,36 @@ static void dcn10_log_color_state(struct dc *dc, dc->caps.color.mpc.num_3dluts, dc->caps.color.mpc.ogam_ram, dc->caps.color.mpc.ocsc); + DTN_INFO("===== MPC RMCM 3DLUT =====\n"); + DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur, + s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask, + s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel, + s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done, + s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state, + s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode); + } + DTN_INFO("\n"); + DTN_INFO("===== MPC RMCM Shaper =====\n"); + DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur, + s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b, + s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b, + s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state, + s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode); + } } void dcn10_log_hw_state(struct dc *dc, @@ -2664,7 +2845,6 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct mpc *mpc = dc->res_pool->mpc; if (mpc->funcs->set_bg_color) { - memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color)); mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id); } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h index 42ffd1e1299c..57d30ea225f2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h @@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth( void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context); +void dcn10_wait_for_pipe_update_if_needed( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool is_surface_update_only); +void dcn10_set_wait_for_update_needed_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx); void dcn10_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 926c08e790c1..3207addbd4eb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -76,6 +76,7 @@ void dcn20_log_color_state(struct dc *dc, { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; + bool is_gamut_remap_available = false; int i; DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth" @@ -89,15 +90,15 @@ void dcn20_log_color_state(struct dc *dc, struct dcn_dpp_state s = {0}; dpp->funcs->dpp_read_state(dpp, &s); - dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + if (dpp->funcs->dpp_get_gamut_remap) { + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + is_gamut_remap_available = true; + } if (!s.is_enabled) continue; - DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld", + DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s", dpp->inst, (s.dgam_lut_mode == 0) ? "Bypass" : ((s.dgam_lut_mode == 1) ? "sRGB" : @@ -114,10 +115,17 @@ void dcn20_log_color_state(struct dc *dc, (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", (s.rgam_lut_mode == 1) ? "RAM A" : - ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"), + ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass")); + + if (is_gamut_remap_available) { + DTN_INFO(" %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : - ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : - "SW"), + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), s.gamut_remap.temperature_matrix[0].value, s.gamut_remap.temperature_matrix[1].value, s.gamut_remap.temperature_matrix[2].value, @@ -130,6 +138,8 @@ void dcn20_log_color_state(struct dc *dc, s.gamut_remap.temperature_matrix[9].value, s.gamut_remap.temperature_matrix[10].value, s.gamut_remap.temperature_matrix[11].value); + } + DTN_INFO("\n"); } DTN_INFO("\n"); @@ -273,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock( } /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { pipe_ctx->stream_res.tg->funcs->set_gsl( pipe_ctx->stream_res.tg, &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); } else BREAK_TO_DEBUGGER(); } @@ -946,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); + fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz)); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1961,14 +1970,6 @@ static void dcn20_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2053,7 +2054,7 @@ void dcn20_program_front_end_for_ctx( for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + if (pipe->plane_state) { ASSERT(!pipe->plane_state->triplebuffer_flips); /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( @@ -2482,7 +2483,7 @@ bool dcn20_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2806,6 +2807,8 @@ void dcn20_reset_back_end_for_pipe( { struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + struct dccg *dccg = dc->res_pool->dccg; + struct dtbclk_dto_params dto_params = {0}; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { @@ -2866,6 +2869,13 @@ void dcn20_reset_back_end_for_pipe( &pipe_ctx->link_res, pipe_ctx->stream->signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg + && dc->ctx->dce_version >= DCN_VERSION_3_5) { + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } /* @@ -3033,7 +3043,11 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); phyd32clk = get_phyd32clk_src(link); - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + } else { + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + } } else { if (dccg->funcs->enable_symclk_se) dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index e89ebfda4873..37a239219dfe 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -74,6 +74,7 @@ void dcn30_log_color_state(struct dc *dc, { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; + bool is_gamut_remap_available = false; int i; DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode" @@ -88,16 +89,16 @@ void dcn30_log_color_state(struct dc *dc, struct dcn_dpp_state s = {0}; dpp->funcs->dpp_read_state(dpp, &s); - dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + + if (dpp->funcs->dpp_get_gamut_remap) { + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + is_gamut_remap_available = true; + } if (!s.is_enabled) continue; - DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s" - " %12s " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld", + DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s", dpp->inst, s.pre_dgam_mode, (s.pre_dgam_select == 0) ? "sRGB" : @@ -121,7 +122,14 @@ void dcn30_log_color_state(struct dc *dc, (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", (s.rgam_lut_mode == 0) ? "Bypass" : ((s.rgam_lut_mode == 1) ? "RAM A" : - "RAM B"), + "RAM B")); + + if (is_gamut_remap_available) { + DTN_INFO(" %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"), @@ -137,6 +145,8 @@ void dcn30_log_color_state(struct dc *dc, s.gamut_remap.temperature_matrix[9].value, s.gamut_remap.temperature_matrix[10].value, s.gamut_remap.temperature_matrix[11].value); + } + DTN_INFO("\n"); } DTN_INFO("\n"); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index f38340aa3f15..8ba934b83957 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -526,9 +526,15 @@ static void dcn31_reset_back_end_for_pipe( link = pipe_ctx->stream->link; + if (dc->hwseq) + dc->hwseq->wa_state.skip_blank_stream = false; + if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) && - (link->connector_signal == SIGNAL_TYPE_EDP)) + (link->connector_signal == SIGNAL_TYPE_EDP)) { dc->hwss.blank_stream(pipe_ctx); + if (dc->hwseq) + dc->hwseq->wa_state.skip_blank_stream = true; + } pipe_ctx->stream_res.tg->funcs->set_dsc_config( pipe_ctx->stream_res.tg, @@ -556,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); + /* Temporary workaround to perform DSC programming ahead of pipe reset + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, false); + } + /* free acquired resources */ if (pipe_ctx->stream_res.audio) { /*disable az_endpoint*/ @@ -570,7 +589,8 @@ static void dcn31_reset_back_end_for_pipe( pipe_ctx->stream_res.audio = NULL; } } - + if (dc->hwseq) + dc->hwseq->wa_state.skip_blank_stream = false; pipe_ctx->stream = NULL; DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index be26c925fdfa..e68f21fd5f0f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -84,6 +84,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0}; + + if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + } /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index cd0adf72b223..416b1dca3dac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); + if (!odm_dsc) + continue; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } @@ -1181,6 +1183,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign struct dc_stream_state *stream = pipe_ctx->stream; unsigned int odm_combine_factor = 0; bool two_pix_per_container = false; + struct dce_hwseq *hws = stream->ctx->dc->hwseq; two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing); odm_combine_factor = get_odm_config(pipe_ctx, NULL); @@ -1201,7 +1204,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign } else { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_4; - if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) + if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy && + hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx))) *k2_div = PIXEL_RATE_DIV_BY_2; } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 922b8d71cf1a..a267f574b619 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -241,11 +241,6 @@ void dcn35_init_hw(struct dc *dc) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } - if (res_pool->dccg->funcs->dccg_root_gate_disable_control) { - for (i = 0; i < res_pool->pipe_count; i++) - res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0); - } - for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; @@ -901,12 +896,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dccg *dccg = dc->res_pool->dccg; + + /* enable DCFCLK current DCHUB */ pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); /* initialize HUBP on power up */ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); - + /*make sure DPPCLK is on*/ + dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true); + dpp->funcs->dpp_dppclk_control(dpp, false, true); /* make sure OPP_PIPE_CLOCK_EN = 1 */ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( pipe_ctx->stream_res.opp, @@ -923,6 +924,7 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, // Program system aperture settings pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); } + //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst); if (!pipe_ctx->top_pipe && pipe_ctx->plane_state @@ -938,6 +940,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dccg *dccg = dc->res_pool->dccg; + dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); @@ -955,7 +959,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_clk_cntl(hubp, false); dpp->funcs->dpp_dppclk_control(dpp, false, false); -/*to do, need to support both case*/ + dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false); + hubp->power_gated = true; hubp->funcs->hubp_reset(hubp); @@ -967,6 +972,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->top_pipe = NULL; pipe_ctx->bottom_pipe = NULL; pipe_ctx->plane_state = NULL; + //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst); + } void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) @@ -1040,6 +1047,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (dc->caps.sequential_ono) { update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false; update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp && + pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + } } } @@ -1186,6 +1202,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; if (dc->caps.sequential_ono) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + + if (new_pipe->stream_res.dsc && !new_pipe->top_pipe && + update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) { + update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true; + update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (new_pipe->plane_res.hubp && + new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + } + } + } + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { @@ -1543,7 +1578,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; const struct dc *dc = pipe_ctx->stream->link->dc; - if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false) return false; // Not necessary for MST configurations diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 6a82a865209c..a3ccf805bd16 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -168,6 +168,8 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, + .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed, + .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe, }; void dcn35_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 902a96940a01..58f2be2a326b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -158,10 +158,12 @@ static const struct hwseq_private_funcs dcn351_private_funcs = { .set_mcm_luts = dcn32_set_mcm_luts, .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, + .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed, + .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe, }; void dcn351_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 8f5da0ded850..cc9f40d97af2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -2,6 +2,8 @@ // // Copyright 2024 Advanced Micro Devices, Inc. + +#include "os_types.h" #include "dm_services.h" #include "basics/dc_common.h" #include "dm_helpers.h" @@ -49,7 +51,7 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn401_initialize_min_clocks(struct dc *dc) +void dcn401_initialize_min_clocks(struct dc *dc) { struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; @@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // mark dcmode limits present if any clock has distinct AC and DC values from SMU - dc->caps.dcmode_power_limits_present = - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz); + dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present && + dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr); } // Initialize the dccg @@ -407,17 +404,16 @@ void dcn401_populate_mcm_luts(struct dc *dc, struct mpc *mpc = dc->res_pool->mpc; union mcm_lut_params m_lut_params; enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src; - enum hubp_3dlut_fl_format format; + enum hubp_3dlut_fl_format format = 0; enum hubp_3dlut_fl_mode mode; - enum hubp_3dlut_fl_width width; + enum hubp_3dlut_fl_width width = 0; enum hubp_3dlut_fl_addressing_mode addr_mode; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; + enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE; enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE; enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE; - bool is_17x17x17 = true; bool rval; dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); @@ -442,7 +438,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, } /* Shaper */ - if (mcm_luts.shaper) { + if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) { memset(&m_lut_params, 0, sizeof(m_lut_params)); if (mcm_luts.shaper->type == TF_TYPE_HWPWL) m_lut_params.pwl = &mcm_luts.shaper->pwl; @@ -454,11 +450,11 @@ void dcn401_populate_mcm_luts(struct dc *dc, m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; } if (m_lut_params.pwl) { - if (mpc->funcs->populate_lut) - mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id); + if (mpc->funcs->mcm.populate_lut) + mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id); + if (mpc->funcs->program_lut_mode) + mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id); } - if (mpc->funcs->program_lut_mode) - mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id); } /* 3DLUT */ @@ -467,6 +463,7 @@ void dcn401_populate_mcm_luts(struct dc *dc, memset(&m_lut_params, 0, sizeof(m_lut_params)); if (hubp->funcs->hubp_enable_3dlut_fl) hubp->funcs->hubp_enable_3dlut_fl(hubp, false); + if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) { m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d; if (mpc->funcs->populate_lut) @@ -476,16 +473,35 @@ void dcn401_populate_mcm_luts(struct dc *dc, mpcc_id); } break; - case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: + case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: + switch (mcm_luts.lut3d_data.gpu_mem_params.size) { + case DC_CM2_GPU_MEM_SIZE_171717: + width = hubp_3dlut_fl_width_17; + break; + case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: + width = hubp_3dlut_fl_width_transformed; + break; + default: + //TODO: handle default case + break; + } + + //check for support + if (mpc->funcs->mcm.is_config_supported && + !mpc->funcs->mcm.is_config_supported(width)) + break; if (mpc->funcs->program_lut_read_write_control) mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id); if (mpc->funcs->program_lut_mode) mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id); - if (mpc->funcs->program_3dlut_size) - mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id); + if (hubp->funcs->hubp_program_3dlut_fl_addr) hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr); + + if (mpc->funcs->mcm.program_bit_depth) + mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id); + switch (mcm_luts.lut3d_data.gpu_mem_params.layout) { case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: mode = hubp_3dlut_fl_mode_native_1; @@ -512,7 +528,6 @@ void dcn401_populate_mcm_luts(struct dc *dc, switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) { case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: - default: format = hubp_3dlut_fl_format_unorm_12msb_bitslice; break; case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: @@ -524,11 +539,19 @@ void dcn401_populate_mcm_luts(struct dc *dc, } if (hubp->funcs->hubp_program_3dlut_fl_format) hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); - if (hubp->funcs->hubp_update_3dlut_fl_bias_scale) + if (hubp->funcs->hubp_update_3dlut_fl_bias_scale && + mpc->funcs->mcm.program_bias_scale) { + mpc->funcs->mcm.program_bias_scale(mpc, + mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias, + mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale, + mpcc_id); hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, - mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale); + mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias, + mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale); + } + //navi 4x has a bug and r and blue are swapped and need to be worked around here in + //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) { case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: default: @@ -540,21 +563,16 @@ void dcn401_populate_mcm_luts(struct dc *dc, if (hubp->funcs->hubp_program_3dlut_fl_crossbar) hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, + crossbar_bit_slice_cr_r, crossbar_bit_slice_y_g, - crossbar_bit_slice_cb_b, - crossbar_bit_slice_cr_r); + crossbar_bit_slice_cb_b); + + if (mpc->funcs->mcm.program_lut_read_write_control) + mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id); + + if (mpc->funcs->mcm.program_3dlut_size) + mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id); - switch (mcm_luts.lut3d_data.gpu_mem_params.size) { - case DC_CM2_GPU_MEM_SIZE_171717: - default: - width = hubp_3dlut_fl_width_17; - break; - case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: - width = hubp_3dlut_fl_width_transformed; - break; - } - if (hubp->funcs->hubp_program_3dlut_fl_width) - hubp->funcs->hubp_program_3dlut_fl_width(hubp, width); if (mpc->funcs->update_3dlut_fast_load_select) mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); @@ -936,8 +954,11 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); - - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); + } else { + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); + } } else { dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); @@ -1977,23 +1998,15 @@ void dcn401_program_pipe( dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); } - if (pipe_ctx->update_flags.raw || - (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || - pipe_ctx->stream->update_flags.raw) + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2078,7 +2091,7 @@ void dcn401_program_front_end_for_ctx( for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + if (pipe->plane_state) { if (pipe->plane_state->triplebuffer_flips) BREAK_TO_DEBUGGER(); @@ -2368,7 +2381,7 @@ bool dcn401_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2619,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, - IP_REQUEST_EN, 1); + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); + } if (hws->funcs.dpp_pg_control) hws->funcs.dpp_pg_control(hws, dpp->inst, false); @@ -2633,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc, hubp->funcs->hubp_reset(hubp); dpp->funcs->dpp_reset(dpp); - if (org_ip_request_cntl == 0) + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index 781cf0efccc6..2621b7725267 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -109,4 +109,5 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); +void dcn401_initialize_min_clocks(struct dc *dc); #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index c8b5ed834579..9df8030e37f7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -195,6 +195,8 @@ enum block_sequence_func { DMUB_SUBVP_SAVE_SURF_ADDR, HUBP_WAIT_FOR_DCC_META_PROP, DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST, + /* This must be the last value in this enum, add new ones above */ + HWSS_BLOCK_SEQUENCE_FUNC_COUNT }; struct block_sequence { @@ -202,6 +204,8 @@ struct block_sequence { enum block_sequence_func func; }; +#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES) + struct hw_sequencer_funcs { void (*hardware_release)(struct dc *dc); /* Embedded Display Related */ @@ -498,6 +502,9 @@ void get_hdr_visual_confirm_color( void get_mpctree_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color); void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); @@ -534,13 +541,13 @@ void set_drr_and_clear_adjust_pending( struct drr_params *params); void hwss_execute_sequence(struct dc *dc, - struct block_sequence block_sequence[], + struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE], int num_steps); void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, - struct block_sequence block_sequence[], + struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE], unsigned int *num_steps, struct pipe_ctx *pipe_ctx, struct dc_stream_status *stream_status, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 22a5d4a03c98..1e2d247fbbac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -49,6 +49,7 @@ struct hwseq_wa_state { bool DEGVIDCN10_253_applied; bool disallow_self_refresh_during_multi_plane_transition_applied; unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame; + bool skip_blank_stream; }; struct pipe_ctx; @@ -183,6 +184,8 @@ struct hwseq_private_funcs { struct dc_cm2_func_luts mcm_luts, bool lut_bank_a); void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); + void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only); + void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index b5afd8c3103d..82085d9c3f40 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -26,6 +26,8 @@ #ifndef _CORE_STATUS_H_ #define _CORE_STATUS_H_ +#include "dc_hw_types.h" + enum dc_status { DC_OK = 1, @@ -56,6 +58,8 @@ enum dc_status { DC_NO_LINK_ENC_RESOURCE = 26, DC_FAIL_DP_PAYLOAD_ALLOCATION = 27, DC_FAIL_DP_LINK_BANDWIDTH = 28, + DC_FAIL_HW_CURSOR_SUPPORT = 29, + DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30, DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d0021f25f3d8..f0d7185153b2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -65,6 +65,9 @@ struct resource_pool; struct dc_state; struct resource_context; struct clk_bw_params; +struct dc_mcache_params; + +#define MAX_RMCM_INST 2 struct resource_funcs { enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); @@ -78,11 +81,10 @@ struct resource_funcs { /* Create a minimal link encoder object with no dc_link object * associated with it. */ struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id); - - bool (*validate_bandwidth)( + enum dc_status (*validate_bandwidth)( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void (*calculate_wm_and_dlg)( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -107,7 +109,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * Algorithm for assigning available link encoders to links. @@ -218,6 +220,16 @@ struct resource_funcs { int (*get_power_profile)(const struct dc_state *context); unsigned int (*get_det_buffer_size)(const struct dc_state *context); unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx); + unsigned int (*get_max_hw_cursor_size)(const struct dc *dc, + struct dc_state *state, + const struct dc_stream_state *stream); + bool (*program_mcache_pipe_config)(struct dc_state *context, + const struct dc_mcache_params *mcache_params); + enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); }; struct audio_support{ @@ -276,6 +288,7 @@ struct resource_pool { struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; + struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST]; struct { unsigned int xtalin_clock_inKhz; @@ -382,7 +395,9 @@ struct link_resource { struct link_config { struct dc_link_settings dp_link_settings; + struct dc_tunnel_settings dp_tunnel_settings; }; + union pipe_update_flags { struct { uint32_t enable : 1; @@ -480,6 +495,10 @@ struct pipe_ctx { struct pixel_rate_divider pixel_rate_divider; /* pixels borrowed from hblank to hactive */ uint8_t hblank_borrow; + /* next vupdate */ + uint32_t next_vupdate; + uint32_t wait_frame_count; + bool wait_is_required; }; /* Data used for dynamic link encoder assignment. @@ -507,7 +526,7 @@ struct resource_context { unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; bool is_mpc_3dlut_acquired[MAX_PIPES]; - /* solely used for build scalar data in dml2 */ + /* used to build scalar data in dml2 and for edp backlight programming */ struct pipe_ctx temp_pipe; }; @@ -545,7 +564,10 @@ struct dcn_bw_output { struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES]; struct dmub_cmd_fams2_global_config fams2_global_config; union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES]; - union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union { + union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES]; + }; struct dml2_display_arb_regs arb_regs; }; @@ -630,7 +652,7 @@ struct dc_state { */ struct bw_context bw_ctx; - struct block_sequence block_sequence[100]; + struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE]; unsigned int block_sequence_steps; struct dc_dmub_cmd dc_dmub_cmd[10]; unsigned int dmub_cmd_count; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index d19a595c2be4..134091d5842d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults; bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn_get_soc_clks( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index c14d64687a3d..2c9a4a12bd8a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -100,6 +100,17 @@ struct dcn301_clk_internal { #define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 +enum clk_type { + CLK_TYPE_DCFCLK, + CLK_TYPE_FCLK, + CLK_TYPE_MCLK, + CLK_TYPE_SOCCLK, + CLK_TYPE_DTBCLK, + CLK_TYPE_DISPCLK, + CLK_TYPE_DPPCLK, + CLK_TYPE_DSCCLK, + CLK_TYPE_COUNT +}; struct clk_limit_table_entry { unsigned int voltage; /* milivolts withh 2 fractional bits */ @@ -324,6 +335,11 @@ struct clk_mgr_funcs { int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base); + bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr); + + uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set); + + unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); }; struct clk_mgr { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 221645c023b5..bac8febad69a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -199,6 +199,7 @@ enum dentist_divider_range { CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \ CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \ CLK_SR_DCN35(CLK5_spll_field_8), \ + CLK_SR_DCN35(CLK6_spll_field_8), \ SR(DENTIST_DISPCLK_CNTL), \ #define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \ @@ -307,7 +308,7 @@ struct clk_mgr_registers { uint32_t CLK1_CLK4_ALLOW_DS; uint32_t CLK1_CLK5_ALLOW_DS; uint32_t CLK5_spll_field_8; - + uint32_t CLK6_spll_field_8; }; struct clk_mgr_shift { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index e94e9ba60f55..61c4d2a7db1c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -211,7 +211,7 @@ struct dccg_funcs { struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst); - void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst); + void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index b610beb075d5..cee29e89ec5c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -282,7 +282,7 @@ struct hubp_funcs { void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable); void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode); void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width); - void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled); + void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits); void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 3a89cc0cffc1..7641439f6ca0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -190,6 +190,42 @@ struct mpc_grph_gamut_adjustment { enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id; }; +struct mpc_rmcm_regs { + uint32_t rmcm_3dlut_mem_pwr_state; + uint32_t rmcm_3dlut_mem_pwr_force; + uint32_t rmcm_3dlut_mem_pwr_dis; + uint32_t rmcm_3dlut_mem_pwr_mode; + uint32_t rmcm_3dlut_size; + uint32_t rmcm_3dlut_mode; + uint32_t rmcm_3dlut_mode_cur; + uint32_t rmcm_3dlut_read_sel; + uint32_t rmcm_3dlut_30bit_en; + uint32_t rmcm_3dlut_wr_en_mask; + uint32_t rmcm_3dlut_ram_sel; + uint32_t rmcm_3dlut_out_norm_factor; + uint32_t rmcm_3dlut_fl_sel; + uint32_t rmcm_3dlut_out_offset_r; + uint32_t rmcm_3dlut_out_scale_r; + uint32_t rmcm_3dlut_fl_done; + uint32_t rmcm_3dlut_fl_soft_underflow; + uint32_t rmcm_3dlut_fl_hard_underflow; + uint32_t rmcm_cntl; + uint32_t rmcm_shaper_mem_pwr_state; + uint32_t rmcm_shaper_mem_pwr_force; + uint32_t rmcm_shaper_mem_pwr_dis; + uint32_t rmcm_shaper_mem_pwr_mode; + uint32_t rmcm_shaper_lut_mode; + uint32_t rmcm_shaper_mode_cur; + uint32_t rmcm_shaper_lut_write_en_mask; + uint32_t rmcm_shaper_lut_write_sel; + uint32_t rmcm_shaper_offset_b; + uint32_t rmcm_shaper_scale_b; + uint32_t rmcm_shaper_rama_exp_region_start_b; + uint32_t rmcm_shaper_rama_exp_region_start_seg_b; + uint32_t rmcm_shaper_rama_exp_region_end_b; + uint32_t rmcm_shaper_rama_exp_region_end_base_b; +}; + struct mpcc_sm_cfg { bool enable; /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */ @@ -301,6 +337,7 @@ struct mpcc_state { uint32_t rgam_mode; uint32_t rgam_lut; struct mpc_grph_gamut_adjustment gamut_remap; + struct mpc_rmcm_regs rmcm_regs; }; /** @@ -967,23 +1004,6 @@ struct mpc_funcs { */ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx); - /** - * @get_3dlut_fast_load_status: - * - * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers. - * - * Parameters: - * - [in/out] mpc - MPC context. - * - [in] mpcc_id - * - [in/out] done - * - [in/out] soft_underflow - * - [in/out] hard_underflow - * - * Return: - * - * void - */ - void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow); /** * @populate_lut: @@ -1054,6 +1074,45 @@ struct mpc_funcs { * void */ void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id); + + /** + * @mcm: + * + * MPC MCM new HW sequential programming functions + */ + struct { + void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id); + void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id); + void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id); + bool (*is_config_supported)(uint32_t width); + void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, + bool lut_bank_a, bool enabled, int mpcc_id); + + void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params, + bool lut_bank_a, int mpcc_id); + } mcm; + + /** + * @rmcm: + * + * MPC RMCM new HW sequential programming functions + */ + struct { + void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id); + void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx); + void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, + bool lut_bank_a, bool enabled, int mpcc_id); + void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable, + bool lut_bank_a, int mpcc_id); + void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id); + void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id); + void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id); + bool (*is_config_supported)(uint32_t width); + + void (*power_on_shaper_3dlut)(struct mpc *mpc, uint32_t mpcc_id, bool power_on); + void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params, + bool lut_bank_a, int mpcc_id); + } rmcm; }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h index 7f371cbb35cd..0d5a8358a778 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h @@ -68,6 +68,7 @@ struct optc { int pstate_keepout; struct dc_crtc_timing orginal_patched_timing; enum signal_type signal; + uint32_t max_frame_count; }; void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index 00ea3864dd4d..44f86cc2d1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -46,6 +46,8 @@ struct pg_cntl_funcs { void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on); void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe7f3137f228..27f950ae45ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -117,6 +117,7 @@ struct stream_encoder { uint32_t stream_enc_inst; struct vpg *vpg; struct afmt *afmt; + struct apg *apg; }; struct enc_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 2948a696ee12..f2503402c10e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -144,9 +144,9 @@ struct link_service { uint32_t (*dp_link_bandwidth_kbps)( const struct dc_link *link, const struct dc_link_settings *link_settings); - bool (*validate_dpia_bandwidth)( - const struct dc_stream_state *stream, - const unsigned int num_streams); + enum dc_status (*validate_dp_tunnel_bandwidth)( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t (*dp_required_hblank_size_bytes)( const struct dc_link *link, @@ -207,6 +207,9 @@ struct link_service { bool (*dp_decide_link_settings)( struct dc_stream_state *stream, struct dc_link_settings *link_setting); + void (*dp_decide_tunnel_settings)( + struct dc_stream_state *stream, + struct dc_tunnel_settings *dp_tunnel_setting); enum dp_link_encoding (*mst_decide_link_encoding_format)( const struct dc_link *link); bool (*edp_decide_link_settings)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index a402df225a76..26cb1459b743 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -508,6 +508,10 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, initial_val, \ n, __VA_ARGS__) +#define IX_REG_SET_SYNC(index, init_value, f1, v1) \ + IX_REG_SET_N_SYNC(index, 1, init_value, \ + FN(reg, f1), v1) + #define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \ IX_REG_SET_N_SYNC(index, 2, init_value, \ FN(reg, f1), v1,\ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 9458187b834d..a890f581f4e8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -32,6 +32,7 @@ #define MEMORY_TYPE_MULTIPLIER_CZ 4 #define MEMORY_TYPE_HBM 2 +#define MAX_MCACHES 8 #define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0) @@ -65,6 +66,13 @@ struct resource_straps { uint32_t audio_stream_number; }; +struct dc_mcache_allocations { + int global_mcache_ids_plane0[MAX_MCACHES + 1]; + int global_mcache_ids_plane1[MAX_MCACHES + 1]; + int global_mcache_ids_mall_plane0[MAX_MCACHES + 1]; + int global_mcache_ids_mall_plane1[MAX_MCACHES + 1]; +}; + struct resource_create_funcs { void (*read_dce_straps)( struct dc_context *ctx, struct resource_straps *straps); @@ -628,8 +636,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); -bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); - /* Get hw programming parameters container from pipe context * @pipe_ctx: pipe context * @dscl_prog_data: struct to hold programmable hw reg values diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 953f4a4dacad..33ce470e4c88 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -37,36 +37,9 @@ #include "ivsrcid/ivsrcid_vislands30.h" -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c index 2c72074310c7..d777b85e70da 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c @@ -46,36 +46,9 @@ #include "dc_types.h" -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - DC_HPD1_INT_STATUS, - DC_HPD1_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - DC_HPD1_INT_CONTROL, - DC_HPD1_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd1_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create( dce60_irq_construct(irq_service, init_data); return irq_service; } - - diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 49317934ef4f..3a9163acb49b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -37,36 +37,9 @@ #include "dc_types.h" -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - DC_HPD1_INT_STATUS, - DC_HPD1_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - DC_HPD1_INT_CONTROL, - DC_HPD1_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd1_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create( dce80_irq_construct(irq_service, init_data); return irq_service; } - - diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index 9ca28565a9d1..4ce9edd16344 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 916f0c974637..5847af0e66cb 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index 1d61d475d36f..6417011d2246 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 42cdfe6c3538..71d2f065140b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic return DC_IRQ_SOURCE_INVALID; } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c index a443a8abb1ea..2a4080bdcf6b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c @@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create( dcn30_irq_construct(irq_service, init_data); return irq_service; } - diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c index 8ffc7e2c681a..624f1ac309f8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c @@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi } } -static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c index 262bb8b74b15..137caffae916 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c @@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi } } -static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c index 53e78ae7eecf..921cb167d920 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c @@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c index e0563e880432..0118fd6e5db0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c @@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c index 2ef22299101a..adebfc888618 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c @@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c index f0ac0aeeac51..e9e315c75d76 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c @@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -191,6 +164,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { .ack = NULL }; +static struct irq_source_info_funcs vline1_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs vline2_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg @@ -259,6 +242,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } +#define vblank_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ + .funcs = &vblank_irq_info_funcs\ + } /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -270,14 +260,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &vupdate_no_lock_irq_info_funcs\ } -#define vblank_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ -} - #define vline0_int_entry(reg_num)\ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ @@ -285,6 +267,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ .funcs = &vline0_irq_info_funcs\ } +#define vline1_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\ + .funcs = &vline1_irq_info_funcs\ + } +#define vline2_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\ + .funcs = &vline2_irq_info_funcs\ + } #define dmub_outbox_int_entry()\ [DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\ IRQ_REG_ENTRY_DMUB(\ @@ -387,21 +383,29 @@ irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_no_lock_int_entry(0), - vupdate_no_lock_int_entry(1), - vupdate_no_lock_int_entry(2), - vupdate_no_lock_int_entry(3), vblank_int_entry(0), vblank_int_entry(1), vblank_int_entry(2), vblank_int_entry(3), + [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(), + [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(), + dmub_outbox_int_entry(), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vupdate_no_lock_int_entry(2), + vupdate_no_lock_int_entry(3), vline0_int_entry(0), vline0_int_entry(1), vline0_int_entry(2), vline0_int_entry(3), - [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(), - [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(), - dmub_outbox_int_entry(), + vline1_int_entry(0), + vline1_int_entry(1), + vline1_int_entry(2), + vline1_int_entry(3), + vline2_int_entry(0), + vline2_int_entry(1), + vline2_int_entry(2), + vline2_int_entry(3) }; static const struct irq_service_funcs irq_service_funcs_dcn32 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c index ea8c271171bc..79e5e8c137ca 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c @@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c index 7ec8e0de2f01..163b8ee9ebf7 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c @@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c index ea958628f8b8..f716ab0fd30e 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c @@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c index b43c9524b0de..fd9bb1950c20 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c @@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401( } } -static bool hpd_ack( - struct irq_service *irq_service, - const struct irq_source_info *info) -{ - uint32_t addr = info->status_reg; - uint32_t value = dm_read_reg(irq_service->ctx, addr); - uint32_t current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE_DELAYED); - - dal_irq_service_ack_generic(irq_service, info); - - value = dm_read_reg(irq_service->ctx, info->enable_reg); - - set_reg_field_value( - value, - current_status ? 0 : 1, - HPD0_DC_HPD_INT_CONTROL, - DC_HPD_INT_POLARITY); - - dm_write_reg(irq_service->ctx, info->enable_reg, value); - - return true; -} - static struct irq_source_info_funcs hpd_irq_info_funcs = { .set = NULL, - .ack = hpd_ack + .ack = hpd0_ack }; static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { @@ -171,6 +144,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { .ack = NULL }; +static struct irq_source_info_funcs vline1_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs vline2_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #undef BASE_INNER #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg @@ -239,6 +222,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } +#define vblank_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ + .funcs = &vblank_irq_info_funcs\ + } /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -250,13 +240,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &vupdate_no_lock_irq_info_funcs\ } -#define vblank_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ - } #define vline0_int_entry(reg_num)\ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ @@ -264,6 +247,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = { OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ .funcs = &vline0_irq_info_funcs\ } +#define vline1_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\ + .funcs = &vline1_irq_info_funcs\ + } +#define vline2_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\ + .funcs = &vline2_irq_info_funcs\ + } #define dmub_outbox_int_entry()\ [DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\ IRQ_REG_ENTRY_DMUB(\ @@ -364,21 +361,29 @@ irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_no_lock_int_entry(0), - vupdate_no_lock_int_entry(1), - vupdate_no_lock_int_entry(2), - vupdate_no_lock_int_entry(3), vblank_int_entry(0), vblank_int_entry(1), vblank_int_entry(2), vblank_int_entry(3), + [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(), + [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(), + dmub_outbox_int_entry(), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vupdate_no_lock_int_entry(2), + vupdate_no_lock_int_entry(3), vline0_int_entry(0), vline0_int_entry(1), vline0_int_entry(2), vline0_int_entry(3), - [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(), - [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(), - dmub_outbox_int_entry(), + vline1_int_entry(0), + vline1_int_entry(1), + vline1_int_entry(2), + vline1_int_entry(3), + vline2_int_entry(0), + vline2_int_entry(1), + vline2_int_entry(2), + vline2_int_entry(3), }; static const struct irq_service_funcs irq_service_funcs_dcn401 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index eca3d7ee7e4e..b595a11c5eaf 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -41,6 +41,16 @@ #include "reg_helper.h" #include "irq_service.h" +//HPD0_DC_HPD_INT_STATUS +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8 +//HPD1_DC_HPD_INT_STATUS +#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10 +#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4 +#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100 +#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8 #define CTX \ @@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source( src_id, ext_id); } + +bool hpd0_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + HPD0_DC_HPD_INT_CONTROL, + DC_HPD_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +bool hpd1_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + DC_HPD1_INT_STATUS, + DC_HPD1_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + DC_HPD1_INT_CONTROL, + DC_HPD1_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index b178f85944cd..bbcef3d2fe33 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -82,4 +82,12 @@ void dal_irq_service_set_generic( const struct irq_source_info *info, bool enable); +bool hpd0_ack( + struct irq_service *irq_service, + const struct irq_source_info *info); + +bool hpd1_ack( + struct irq_service *irq_service, + const struct irq_source_info *info); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index 110f656d43ae..a2f7b933bebf 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -161,6 +161,20 @@ enum dc_irq_source { DC_IRQ_SOURCE_DPCX_TX_PHYE, DC_IRQ_SOURCE_DPCX_TX_PHYF, + DC_IRQ_SOURCE_DC1_VLINE2, + DC_IRQ_SOURCE_DC2_VLINE2, + DC_IRQ_SOURCE_DC3_VLINE2, + DC_IRQ_SOURCE_DC4_VLINE2, + DC_IRQ_SOURCE_DC5_VLINE2, + DC_IRQ_SOURCE_DC6_VLINE2, + + DC_IRQ_SOURCE_DCI2C_RR_DDC1, + DC_IRQ_SOURCE_DCI2C_RR_DDC2, + DC_IRQ_SOURCE_DCI2C_RR_DDC3, + DC_IRQ_SOURCE_DCI2C_RR_DDC4, + DC_IRQ_SOURCE_DCI2C_RR_DDC5, + DC_IRQ_SOURCE_DCI2C_RR_DDC6, + DAL_IRQ_SOURCES_NUMBER }; @@ -170,6 +184,8 @@ enum irq_type IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1, IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1, IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0, + IRQ_TYPE_VLINE1 = DC_IRQ_SOURCE_DC1_VLINE1, + IRQ_TYPE_VLINE2 = DC_IRQ_SOURCE_DC1_VLINE2, IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW, }; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 96febabf464a..2956c2b3ad1a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -34,6 +34,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" +#include "clk_mgr.h" #define DC_LOGGER \ link->ctx->logger @@ -67,10 +68,17 @@ static void dp_retrain_link_dp_test(struct dc_link *link, { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; + struct dc_stream_update stream_update = { 0 }; + bool dpms_off = false; + bool needs_divider_update = false; bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state); bool is_hpo_acquired; uint8_t count; int i; + struct audio_output audio_output[MAX_PIPES]; + + needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) != + link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); udelay(100); @@ -83,16 +91,59 @@ static void dp_retrain_link_dp_test(struct dc_link *link, link->dc, state, pipes[i]); + + // Disable OTG and re-enable after updating clocks + pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg); } - if (link->dc->hwss.setup_hpo_hw_control) { - is_hpo_acquired = resource_is_hpo_acquired(state); - if (was_hpo_acquired != is_hpo_acquired) - link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) { + link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link, + link_setting, count, + *pipes, &audio_output[0]); + for (i = 0; i < count; i++) { + pipes[i]->clock_source->funcs->program_pix_clk( + pipes[i]->clock_source, + &pipes[i]->stream_res.pix_clk_params, + link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings), + &pipes[i]->pll_settings); + + if (pipes[i]->stream_res.audio != NULL) { + const struct link_hwss *link_hwss = get_link_hwss( + link, &pipes[i]->link_res); + + link_hwss->setup_audio_output(pipes[i], &audio_output[i], + pipes[i]->stream_res.audio->inst); + + pipes[i]->stream_res.audio->funcs->az_configure( + pipes[i]->stream_res.audio, + pipes[i]->stream->signal, + &audio_output[i].crtc_info, + &pipes[i]->stream->audio_info, + &audio_output[i].dp_link_info); + + if (link->dc->config.disable_hbr_audio_dp2 && + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio && + link->dc->link_srv->dp_is_128b_132b_signal(pipes[i])) + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio); + } + } } - for (i = count-1; i >= 0; i--) - link_set_dpms_on(state, pipes[i]); + // Toggle on HPO I/O if necessary + is_hpo_acquired = resource_is_hpo_acquired(state); + if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control) + link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + + for (i = 0; i < count; i++) + pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg); + + // Set DPMS on with stream update + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) { + stream_update.stream = state->streams[i]; + stream_update.dpms_off = &dpms_off; + dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update); + } } static void dp_test_send_link_training(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index 116ff37126e7..55c5148de800 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { - uint8_t clk_src = 0x4C; + uint8_t clk_src = 0xC4; uint8_t pattern = 0x4F; /* SQ128 */ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index cc9191a5c9e6..827b630daf49 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link, if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; - if (!detect_dp_sink_caps(link)) + if (!detect_dp_sink_caps(link)) { return false; + } if (is_dp_branch_device(link)) /* DP SST branch */ @@ -611,6 +612,7 @@ static bool detect_dp(struct dc_link *link, link->dpcd_caps.dongle_type = sink_caps->dongle_type; link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; link->dpcd_caps.dpcd_rev.raw = 0; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0; } return true; @@ -1007,21 +1009,11 @@ static bool detect_link_and_local_sink(struct dc_link *link, link->reported_link_cap.link_rate > LINK_RATE_HIGH3) link->reported_link_cap.link_rate = LINK_RATE_HIGH3; - /* - * If this is DP over USB4 link then we need to: - * - Enable BW ALLOC support on DPtx if applicable - */ - if (dc->config.usb4_bw_alloc_support) { - if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) { - /* update with non reduced link cap if bw allocation mode is supported */ - if (link->dpia_bw_alloc_config.nrd_max_link_rate && - link->dpia_bw_alloc_config.nrd_max_lane_count) { - link->reported_link_cap.link_rate = - link->dpia_bw_alloc_config.nrd_max_link_rate; - link->reported_link_cap.lane_count = - link->dpia_bw_alloc_config.nrd_max_lane_count; - } - } + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) { + if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false) + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false; } break; } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 268626e73c54..8c8682f743d6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) } } - if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) && + (link->type != dc_connection_none)) dpcd_write_rx_power_ctrl(link, false); } } @@ -148,6 +149,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) void link_set_all_streams_dpms_off_for_link(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_stream_state *streams[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; @@ -160,10 +162,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link) link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + /* The subsequent call to dc_commit_updates_for_stream for a full update + * will release the current state and swap to a new state. Releasing the + * current state results in the stream pointers in the pipe_ctx structs + * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream. + */ + for (i = 0; i < count; i++) + streams[i] = pipes[i]->stream; + for (i = 0; i < count; i++) { - stream_update.stream = pipes[i]->stream; + stream_update.stream = streams[i]; dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, - pipes[i]->stream, &stream_update, + streams[i], &stream_update, state); } @@ -833,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } @@ -2287,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; } - /* get dp overhead for dp tunneling */ - link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); + link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link); req_bw += link->dpia_bw_alloc_config.dp_overhead; link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw); @@ -2365,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation) deallocate_usb4_bandwidth(pipe_ctx->stream); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) @@ -2433,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) { /* reset internal save state to default since eDP is off */ enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link); - /* since current psp not loaded, we need to reset it to default*/ + /* since current psp not loaded, we need to reset it to default */ link->panel_mode = panel_mode; } } @@ -2528,6 +2537,14 @@ void link_set_dpms_on( !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; update_psp_stream_config(pipe_ctx, false); + + if (link->is_dds) { + uint32_t post_oui_delay = 30; // 30ms + + dpcd_set_source_specific_data(link); + msleep(post_oui_delay); + } + return; } @@ -2611,7 +2628,7 @@ void link_set_dpms_on( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_hblank_reduction_on_rx(pipe_ctx); - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation) allocate_usb4_bandwidth(pipe_ctx->stream); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) @@ -2620,6 +2637,15 @@ void link_set_dpms_on( dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, true); + /* Corruption was observed on systems with display mux when stream gets + * enabled after the mux switch. Having a small delay between link + * training and stream unblank resolves the corruption issue. + * This is workaround. + */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + link->is_display_mux_present) + msleep(20); + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index f6b6b19e7481..de1143dbbd25 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv) { link_srv->validate_mode_timing = link_validate_mode_timing; link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps; - link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth; + link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth; link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes; } @@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv) link_srv->dp_get_encoding_format = link_dp_get_encoding_format; link_srv->dp_should_enable_fec = dp_should_enable_fec; link_srv->dp_decide_link_settings = link_decide_link_settings; + link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings; link_srv->mst_decide_link_encoding_format = mst_decide_link_encoding_format; link_srv->edp_decide_link_settings = edp_decide_link_settings; @@ -464,6 +465,7 @@ static bool construct_phy(struct dc_link *link, link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; + link->irq_source_read_request = DC_IRQ_SOURCE_INVALID; link->link_status.dpcd_caps = &link->dpcd_caps; link->dc = init_params->dc; @@ -514,6 +516,9 @@ static bool construct_phy(struct dc_link *link, case CONNECTOR_ID_HDMI_TYPE_A: link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; + if (link->hpd_gpio) + link->irq_source_read_request = + dal_irq_get_read_request(link->hpd_gpio); break; case CONNECTOR_ID_SINGLE_LINK_DVID: case CONNECTOR_ID_SINGLE_LINK_DVII: @@ -534,10 +539,16 @@ static bool construct_phy(struct dc_link *link, break; case CONNECTOR_ID_EDP: + // If smartmux is supported, only create the link on the primary eDP. + // Dual eDP is not supported with smartmux. + if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0)) + goto create_fail; + link->connector_signal = SIGNAL_TYPE_EDP; if (link->hpd_gpio) { - if (!link->dc->config.allow_edp_hotplug_detection) + if (!link->dc->config.allow_edp_hotplug_detection + && !is_smartmux_suported(link)) link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; switch (link->dc->config.allow_edp_hotplug_detection) { @@ -653,7 +664,7 @@ static bool construct_phy(struct dc_link *link, } /* Look for device tag that matches connector signal, - * CRT for rgb, LCD for other supported signal tyes + * CRT for rgb, LCD for other supported signal types */ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id)) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 29606fda029d..aecaf37eee35 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing( if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) return false; break; + case PIXEL_ENCODING_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; default: /* Invalid Pixel Encoding*/ return false; @@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing( if (dongle_caps->dp_hdmi_max_bpc < 12) return false; break; + case COLOR_DEPTH_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; case COLOR_DEPTH_141414: case COLOR_DEPTH_161616: default: @@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps( return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; } +static uint32_t dp_get_timing_bandwidth_kbps( + const struct dc_crtc_timing *timing, + const struct dc_link *link) +{ + return dc_bandwidth_in_kbps_from_timing(timing, + dc_link_get_highest_encoding_format(link)); +} + static bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) @@ -351,63 +367,81 @@ enum dc_status link_validate_mode_timing( return DC_OK; } +static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context, + const struct dc_stream_state *stream) +{ + int i; + const struct dc_tunnel_settings *dp_tunnel_settings = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) { + dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings; + break; + } + } + + return dp_tunnel_settings; +} + /* - * This function calculates the bandwidth required for the stream timing - * and aggregates the stream bandwidth for the respective dpia link - * - * @stream: pointer to the dc_stream_state struct instance - * @num_streams: number of streams to be validated + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * return: true if validation is succeeded + * return: dc_status */ -bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) +enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - int bw_needed[MAX_DPIA_NUM] = {0}; - struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; - int num_dpias = 0; - - for (unsigned int i = 0; i < num_streams; ++i) { - if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { - /* new dpia sst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; + struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 }; + uint8_t link_count = 0; + enum dc_status result = DC_OK; - dpia_link[num_dpias] = stream[i].link; - bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[num_dpias])); - num_dpias++; - } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - uint8_t j = 0; - /* check whether its a known dpia link */ - for (; j < num_dpias; ++j) { - if (dpia_link[j] == stream[i].link) - break; - } + // Iterate through streams in the new context + for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) { + const struct dc_stream_state *stream = new_ctx->streams[i]; + const struct dc_link *link; + const struct dc_tunnel_settings *dp_tunnel_settings; + uint32_t timing_bw; + + if (stream == NULL) + continue; + + link = stream->link; + + if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT + || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + && link->hpd_status)) + continue; - if (j == num_dpias) { - /* new dpia mst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; - else { - dpia_link[j] = stream[i].link; - num_dpias++; - } + dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream); + + if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false)) + continue; + + timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link); + + // Find an existing entry for this 'link' in 'dpia_link_sets' + for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) { + bool is_new_slot = false; + + if (dpia_link_sets[j].link == NULL) { + is_new_slot = true; + link_count++; + dpia_link_sets[j].required_bw = 0; + dpia_link_sets[j].link = link; } - bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[j])); + if (is_new_slot || (dpia_link_sets[j].link == link)) { + dpia_link_sets[j].tunnel_settings = dp_tunnel_settings; + dpia_link_sets[j].required_bw += timing_bw; + break; + } } } - /* Include dp overheads */ - for (uint8_t i = 0; i < num_dpias; ++i) { - int dp_overhead = 0; - - dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]); - bw_needed[i] += dp_overhead; - } + if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false) + result = DC_FAIL_DP_TUNNEL_BW_VALIDATE; - return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias); + return result; } struct dp_audio_layout_config { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index bf398c49c3e8..9553c81053fe 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -30,9 +30,9 @@ enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, const struct dc_crtc_timing *timing); -bool link_validate_dpia_bandwidth( - const struct dc_stream_state *stream, - const unsigned int num_streams); +enum dc_status link_validate_dp_tunnel_bandwidth( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t dp_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 21ee0d96c9d4..651926e547b9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) return 0; // invalid value } +uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count) +{ + /* Calculate offset for LTTPR closest to DPTX which is highest in the chain + * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1 + */ + return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1); +} + uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) { switch (bw) { @@ -377,9 +385,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx) bool dp_is_lttpr_present(struct dc_link *link) { /* Some sink devices report invalid LTTPR revision, so don't validate against that cap */ - return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + bool is_lttpr_present = (lttpr_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4); + + if (lttpr_count > 0 && !is_lttpr_present) + DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n"); + + return is_lttpr_present; } /* in DP compliance test, DPR-120 may have @@ -1374,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link) struct dpcd_amd_signature amd_signature = {0}; struct dpcd_amd_device_id amd_device_id = {0}; + if (link->is_dds) { + uint8_t dpcd_dp_edp_backlight_mode = 0; + + /* + * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register + * if platform is DDS + */ + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + dpcd_dp_edp_backlight_mode &= ~0x3; + + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + } + amd_device_id.device_id_byte1 = (uint8_t)(link->ctx->asic_id.chip_id); amd_device_id.device_id_byte2 = @@ -1529,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; + if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) { + link->dpcd_sink_ext_caps.raw = 0; + return false; + } if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) return false; @@ -1543,6 +1576,8 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) uint8_t lttpr_dpcd_data[10] = {0}; enum dc_status status; bool is_lttpr_present; + uint32_t lttpr_count; + uint32_t closest_lttpr_offset; /* Logic to determine LTTPR support*/ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; @@ -1594,20 +1629,22 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + /* If this chip cap is set, at least one retimer must exist in the chain * Override count to 1 if we receive a known bad count (0 or an invalid value) */ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && - (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { + lttpr_count == 0) { /* If you see this message consistently, either the host platform has FIXED_VS flag * incorrectly configured or the sink device is returning an invalid count. */ DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; + lttpr_count = 1; DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); } - /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = dp_is_lttpr_present(link); DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); @@ -1615,11 +1652,25 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) if (is_lttpr_present) { CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); - core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui)); - CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: "); + // Identify closest LTTPR to determine if workarounds required for known embedded LTTPR + closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count); - core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id)); - CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: "); + core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset), + link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui)); + core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset), + link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id)); + + if (lttpr_count > 1) { + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), + "Closest LTTPR To Host's IEEE OUI: "); + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), + "Closest LTTPR To Host's LTTPR Device ID: "); + } else { + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), + "LTTPR IEEE OUI: "); + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), + "LTTPR Device ID: "); + } } return status; @@ -2013,11 +2064,9 @@ static bool retrieve_link_cap(struct dc_link *link) sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw)); /* Read DP tunneling information. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - status = dpcd_get_tunneling_device_data(link); - if (status != DC_OK) - dm_error("%s: Read DP tunneling device data failed.\n", __func__); - } + status = dpcd_get_tunneling_device_data(link); + if (status != DC_OK) + DC_LOG_DP2("%s: Read DP tunneling device data failed.\n", __func__); retrieve_cable_id(link); dpcd_write_cable_id_to_dprx(link); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h index 0ce0af3ddbeb..940b147cc5d4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); /* Convert PHY repeater count read from DPCD uint8_t. */ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count); +/* Calculate embedded LTTPR address offset for vendor-specific behaviour */ +uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count); + bool dp_is_sink_present(struct dc_link *link); bool dp_is_lttpr_present(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 0d123e647652..9b2f1a7da1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -62,6 +62,39 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (status != DC_OK) goto err; + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = + dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false) + goto err; + + link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = + dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) { + status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY, + dpcd_dp_tun_data, 2); + + if (status != DC_OK) + goto err; + + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = + dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY]; + link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY]; + } + + DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) " + "DPIA_BW_Alloc_support=%d " + "CM_BW_Alloc_support=%d ", + __func__, link->link_index, + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id, + link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num, + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc, + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support); + status = core_link_read_dpcd( link, DP_USB4_ROUTER_TOPOLOGY_ID, @@ -71,13 +104,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (status != DC_OK) goto err; - link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = - dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = - dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; - link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = - dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; - for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; @@ -92,6 +118,7 @@ bool dpia_query_hpd_status(struct dc_link *link) /* prepare QUERY_HPD command */ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; + cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data); cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; @@ -119,3 +146,26 @@ bool dpia_query_hpd_status(struct dc_link *link) return link->hpd_status; } +void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, + struct dc_tunnel_settings *dp_tunnel_setting) +{ + struct dc_link *link = stream->link; + + memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting)); + + if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { + dp_tunnel_setting->should_enable_dp_tunneling = + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling; + + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) { + dp_tunnel_setting->should_use_dp_bw_allocation = true; + dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F; + dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id; + dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw; + dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw; + dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity; + } + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h index 363f45a1a964..a61edfc9ca7a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); * Returns true if HPD high. */ bool dpia_query_hpd_status(struct dc_link *link); + +/* Decide the DP tunneling settings based on the DPCD capabilities + */ +void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, + struct dc_tunnel_settings *dp_tunnel_setting); + #endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index a254ead2f7e8..819bf2d8ba53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -35,6 +35,8 @@ #define Kbps_TO_Gbps (1000 * 1000) +#define MST_TIME_SLOT_COUNT 64 + // ------------------------------------------------------------------ // PRIVATE FUNCTIONS // ------------------------------------------------------------------ @@ -46,9 +48,10 @@ */ static bool link_dp_is_bw_alloc_available(struct dc_link *link) { - return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA - && link->hpd_status - && link->dpia_bw_alloc_config.bw_alloc_enabled); + return (link && link->hpd_status + && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support); } static void reset_bw_alloc_struct(struct dc_link *link) @@ -141,7 +144,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link) * granuality, Driver_ID, CM_Group, & populate the BW allocation structs * for host router and dpia */ -static void init_usb4_bw_struct(struct dc_link *link) +static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link) { reset_bw_alloc_struct(link); @@ -159,78 +162,6 @@ static void init_usb4_bw_struct(struct dc_link *link) link->dpia_bw_alloc_config.nrd_max_lane_count); } -static uint8_t get_lowest_dpia_index(struct dc_link *link) -{ - const struct dc *dc_struct = link->dc; - uint8_t idx = 0xFF; - int i; - - for (i = 0; i < MAX_LINKS; ++i) { - - if (!dc_struct->links[i] || - dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - if (idx > dc_struct->links[i]->link_index) { - idx = dc_struct->links[i]->link_index; - break; - } - } - - return idx; -} - -/* - * Get the maximum dp tunnel banwidth of host router - * - * @dc: pointer to the dc struct instance - * @hr_index: host router index - * - * return: host router maximum dp tunnel bandwidth - */ -static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index) -{ - uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]); - uint8_t hr_index_temp = 0; - struct dc_link *link_dpia_primary, *link_dpia_secondary; - int total_bw = 0; - - for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { - - if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2; - - if (hr_index_temp == hr_index) { - link_dpia_primary = dc->links[i]; - link_dpia_secondary = dc->links[i + 1]; - - /** - * If BW allocation enabled on both DPIAs, then - * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary) - * otherwise HR BW = Estimated(bw alloc enabled dpia) - */ - if ((link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) && - (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) { - total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw + - link_dpia_secondary->dpia_bw_alloc_config.allocated_bw; - } else if (link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw; - } else if (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw; - } - break; - } - } - - return total_bw; -} - /* * Cleanup function for when the dpia is unplugged to reset struct * and perform any required clean up @@ -250,93 +181,81 @@ static void dpia_bw_alloc_unplug(struct dc_link *link) static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw) { - uint8_t requested_bw; - uint32_t temp; + uint8_t request_reg_val; + uint32_t temp, request_bw; - /* Error check whether request bw greater than allocated */ - if (req_bw > link->dpia_bw_alloc_config.estimated_bw) { - DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n", - __func__, link->link_index); - req_bw = link->dpia_bw_alloc_config.estimated_bw; + if (link->dpia_bw_alloc_config.bw_granularity == 0) { + DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index); + return; } temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; - requested_bw = temp / Kbps_TO_Gbps; + request_reg_val = temp / Kbps_TO_Gbps; /* Always make sure to add more to account for floating points */ if (temp % Kbps_TO_Gbps) - ++requested_bw; + ++request_reg_val; - /* Error check whether requested and allocated are equal */ - req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) { - DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n", - __func__, link->link_index); + request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + if (request_bw > link->dpia_bw_alloc_config.estimated_bw) { + DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!", + __func__, link->link_index, + req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw); + req_bw = link->dpia_bw_alloc_config.estimated_bw; + + temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; + request_reg_val = temp / Kbps_TO_Gbps; + if (temp % Kbps_TO_Gbps) + ++request_reg_val; } + link->dpia_bw_alloc_config.allocated_bw = request_bw; + DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw); + core_link_write_dpcd(link, REQUESTED_BW, - &requested_bw, + &request_reg_val, sizeof(uint8_t)); } // ------------------------------------------------------------------ // PUBLIC FUNCTIONS // ------------------------------------------------------------------ -bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) +bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) { bool ret = false; - uint8_t response = 0, - bw_support_dpia = 0, - bw_support_cm = 0; + uint8_t val; - if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status)) - goto out; + if (link->hpd_status) { + val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; - if (core_link_read_dpcd( - link, - DP_TUNNELING_CAPABILITIES, - &response, - sizeof(uint8_t)) == DC_OK) - bw_support_dpia = (response >> 7) & 1; - - if (core_link_read_dpcd( - link, - USB4_DRIVER_BW_CAPABILITY, - &response, - sizeof(uint8_t)) == DC_OK) - bw_support_cm = (response >> 7) & 1; - - /* Send request acknowledgment to Turn ON DPTX support */ - if (bw_support_cm && bw_support_dpia) { - - response = 0x80; - if (core_link_write_dpcd( - link, - DPTX_BW_ALLOCATION_MODE_CONTROL, - &response, - sizeof(uint8_t)) != DC_OK) { - DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n", - __func__, link->link_index); - } else { - // SUCCESS Enabled DPtx BW Allocation Mode Support - DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n", - __func__, link->link_index); + if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { + DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index); + + retrieve_usb4_dp_bw_allocation_info(link); + + if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) { + link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; + link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; + } - ret = true; - init_usb4_bw_struct(link); link->dpia_bw_alloc_config.bw_alloc_enabled = true; + ret = true; - /* - * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other - * DPIA. CM release preallocation only when allocation is complete. Do zero alloc - * to make the CM to release preallocation and update estimated BW correctly for - * all DPIAs per host router - */ - link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); - } + if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) { + /* + * During DP tunnel creation, the CM preallocates BW + * and reduces the estimated BW of other DPIAs. + * The CM releases the preallocation only when the allocation is complete. + * Perform a zero allocation to make the CM release the preallocation + * and correctly update the estimated BW for all DPIAs per host router. + */ + link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); + } + } else + DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index); } -out: return ret; } @@ -350,19 +269,17 @@ out: */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } @@ -378,7 +295,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) */ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw) { - if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) { + if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling + && link->dpia_bw_alloc_config.bw_alloc_enabled) { //1. Hot Plug if (link->hpd_status && peak_bw > 0) { // If DP over USB4 then we need to check BW allocation @@ -394,83 +312,130 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) { - DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n", + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + + DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d", __func__, link->link_index, link->hpd_status, - link->dpia_bw_alloc_config.allocated_bw, req_bw); + link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.estimated_bw, + req_bw); if (link_dp_is_bw_alloc_available(link)) link_dpia_send_bw_alloc_request(link, req_bw); else - DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__); + DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__); +} + +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link) +{ + uint32_t link_dp_overhead = 0; + + if ((link->type == dc_connection_mst_branch) && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH + * MST overhead is 1/64 of link bandwidth (excluding any overhead) + */ + const struct dc_link_settings *link_cap = dc_link_get_link_cap(link); + + if (link_cap) { + uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * + (uint32_t)link_cap->lane_count * + LINK_RATE_REF_FREQ_IN_KHZ * 8; + link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT) + + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0); + } + } + + return link_dp_overhead; } -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * And then validate if the required bandwidth is within the router's capacity. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count) { - bool ret = true; - int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0; - uint8_t lowest_dpia_index, i, hr_index; + uint32_t granularity_Gbps; + const struct dc_link *link; + uint32_t link_bw_granularity; + uint32_t link_required_bw; + struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 }; + uint8_t i; + bool is_success = true; + uint8_t router_count = 0; + + if ((dpia_link_sets == NULL) || (count == 0)) + return is_success; + + // Iterate through each DP tunneling link (DPIA). + // Aggregate its bandwidth requirements onto the respective USB4 router. + for (i = 0; i < count; i++) { + link = dpia_link_sets[i].link; + link_required_bw = dpia_link_sets[i].required_bw; + const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings; + + if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0) + break; - if (!num_dpias || num_dpias > MAX_DPIA_NUM) - return ret; + if (link->type == dc_connection_mst_branch) + link_required_bw += link_dpia_get_dp_overhead(link); - lowest_dpia_index = get_lowest_dpia_index(link[0]); + granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity); + link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps + + ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0); - /* get total Host Router BW with granularity for the given modes */ - for (i = 0; i < num_dpias; ++i) { - int granularity_Gbps = 0; - int bw_granularity = 0; + // Find or add the USB4 router associated with the current DPIA link + for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) { + if (router_sets[j].is_valid == false) { + router_sets[j].is_valid = true; + router_sets[j].cm_id = dp_tunnel_settings->cm_id; + router_count++; + } - if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) - continue; + if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) { + uint32_t remaining_bw = + dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw; - if (link[i]->link_index < lowest_dpia_index) - continue; + router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw; - granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity); - bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps + - ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0); + if (remaining_bw > router_sets[j].remaining_bw) + router_sets[j].remaining_bw = remaining_bw; - hr_index = (link[i]->link_index - lowest_dpia_index) / 2; - bw_needed_per_hr[hr_index] += bw_granularity; - } + // Get the max estimated BW within the same CM_ID + if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw) + router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw; - /* validate against each Host Router max BW */ - for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) { - if (bw_needed_per_hr[hr_index]) { - host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index); - if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) { - ret = false; + router_sets[j].required_bw += link_bw_granularity; + router_sets[j].dpia_count++; break; } } } - return ret; -} + // Validate bandwidth for each unique router found. + for (i = 0; i < router_count; i++) { + uint32_t total_bw = 0; -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link) -{ - int dp_overhead = 0, link_mst_overhead = 0; + if (router_sets[i].is_valid == false) + break; - if (!link_dp_is_bw_alloc_available(link)) - return dp_overhead; + // Determine the total available bandwidth for the current router based on aggregated data + if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0)) + total_bw = router_sets[i].estimated_bw; + else + total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw; - /* if its mst link, add MTPH overhead */ - if ((link->type == dc_connection_mst_branch) && - !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { - /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH - * MST overhead is 1/64 of link bandwidth (excluding any overhead) - */ - const struct dc_link_settings *link_cap = - dc_link_get_link_cap(link); - uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * - (uint32_t)link_cap->lane_count * - LINK_RATE_REF_FREQ_IN_KHZ * 8; - link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0); + if (router_sets[i].required_bw > total_bw) { + is_success = false; + break; + } } - /* add all the overheads */ - dp_overhead = link_mst_overhead; - - return dp_overhead; + return is_success; } + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 6df9b946b00f..41efcb3e44e2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -28,10 +28,6 @@ #include "link.h" -/* Number of Host Routers per motherboard is 2 */ -#define MAX_HR_NUM 2 -/* Number of DPIA per host router is 2 */ -#define MAX_DPIA_NUM (MAX_HR_NUM * 2) /* * Host Router BW type @@ -42,14 +38,24 @@ enum bw_type { HOST_ROUTER_BW_INVALID, }; +struct usb4_router_validation_set { + bool is_valid; + uint8_t cm_id; + uint8_t dpia_count; + uint32_t required_bw; + uint32_t allocated_bw; + uint32_t estimated_bw; + uint32_t remaining_bw; +}; + /* - * Enable BW Allocation Mode Support from the DP-Tx side + * Enable USB4 DP BW allocation mode * * @link: pointer to the dc_link struct instance * * return: SUCCESS or FAILURE */ -bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link); +bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link); /* * Allocates only what the stream needs for bw, so if: @@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); /* - * Handle the validation of total BW here and confirm that the bw used by each - * DPIA doesn't exceed available BW for each host router (HR) - * - * @link[]: array of link pointer to all possible DPIA links - * @bw_needed[]: bw needed for each DPIA link based on timing - * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM - * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE - */ -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias); - -/* * Obtain all the DP overheads in dp tunneling for the dpia link * * @link: pointer to the dc_link struct instance * * return: DP overheads in DP tunneling */ -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link); /* * Handle DP BW allocation status register @@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status); +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count); + #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 5be00e4ce10b..693477413347 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -229,6 +229,10 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link) link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw; + /* Increment desync error counter if a desync error is detected */ + if (replay_configuration.bits.DESYNC_ERROR_STATUS) + link->replay_settings.replay_desync_error_fail_count++; + if (link->replay_settings.config.force_disable_desync_error_check) return; @@ -240,9 +244,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link) &replay_configuration.raw, sizeof(replay_configuration.raw)); - /* Update desync error counter */ - link->replay_settings.replay_desync_error_fail_count++; - /* Acknowledge and clear error bits */ dm_helpers_dp_write_dpcd( link->ctx, @@ -351,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data( irq_data->raw, DP_SINK_STATUS - DP_SINK_COUNT + 1); - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) { retval = core_link_read_dpcd( link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &irq_data->bytes.link_service_irq_esi0.raw, 1); @@ -520,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, dp_trace_link_loss_increment(link); } - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) { if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ) dp_handle_tunneling_irq(link); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index ef358afdfb65..2dc1a660e504 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -785,7 +785,6 @@ void override_training_settings( lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); - } enum dc_dp_training_pattern decide_cr_training_pattern( diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 34d2e097ca2e..66d0fb1b9b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -35,6 +35,17 @@ #define DC_LOGGER \ link->ctx->logger +static void get_default_8b_10b_lttpr_aux_rd_interval( + union training_aux_rd_interval *training_rd_interval) +{ + /* LTTPR are required to program DPCD 0000Eh to 0x4 (16ms) upon AUX + * read reply to this register. Since old sinks with DPCD rev 1.1 + * and earlier may not support this register, assume the mandatory + * value is programmed by the LTTPR to avoid AUX timeout issues. + */ + training_rd_interval->raw = 0x4; +} + static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, const struct dc_link_settings *link_settings, enum lttpr_mode lttpr_mode) @@ -43,17 +54,22 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, uint32_t wait_in_micro_secs = 100; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT) - wait_in_micro_secs = 400; - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + else if (dp_is_lttpr_present(link)) + get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval); + + if (training_rd_interval.raw != 0) { + if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT) + wait_in_micro_secs = 400; + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } } return wait_in_micro_secs; } @@ -71,13 +87,15 @@ static uint32_t get_eq_training_aux_rd_interval( DP_128B132B_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); - } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && - link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); + } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + else if (dp_is_lttpr_present(link)) + get_default_8b_10b_lttpr_aux_rd_interval(&training_rd_interval); } switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { @@ -124,6 +142,14 @@ void decide_8b_10b_training_settings( lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles + * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences + */ + if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == + AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) { + lt_settings->lttpr_early_tps2 = true; + } } enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) @@ -155,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) return LTTPR_MODE_NON_LTTPR; } +static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t lttpr_count) +{ + /* Vendor-specific LTTPR early TPS2 sequence: + * 1. Output TPS2 + * 2. Wait 400us + * 3. Set link settings as usual + * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host + * 5. Wait 1ms + * 6. Begin link training as usual + * */ + + uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count); + + union dpcd_training_pattern dpcd_pattern = {0}; + + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1; + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1; + + DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__); + + dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX); + + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + + udelay(400); + + dpcd_set_link_settings(link, lt_settings); + + core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1); + + udelay(1000); + } + enum link_training_result perform_8b_10b_clock_recovery_sequence( struct dc_link *link, const struct link_resource *link_res, @@ -365,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training( { enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t repeater_cnt; + uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); uint8_t repeater_id; uint8_t lane = 0; @@ -373,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training( start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); /* 1. set link rate, lane count and spread. */ - dpcd_set_link_settings(link, lt_settings); + if (lt_settings->lttpr_early_tps2) + set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt); + else + dpcd_set_link_settings(link, lt_settings); if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { /* 2. perform link training (set link training done * to false is done as well) */ - repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); repeater_id--) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 1e4adbc764ea..e7927b8f5ba3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds && !link->dpcd_caps.panel_luminance_control) + return true; + // use internal backlight control if dmub capabilities are not present if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && !link->dc->caps.dmub_caps.aux_backlight_support) { @@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link, target_luminance = (struct target_luminance_value *)&backlight_millinits; + //make sure we disable AMD ABC first. + core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, sizeof(uint8_t)); + if (backlight_enable) { + backlight_enable = 0; + core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, 1); + } + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(uint8_t)); @@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; uint8_t backlight_control = isHDR ? 1 : 0; + uint8_t backlight_enable = 0; + // OLEDs have no PWM, they can only use AUX if (link->dpcd_sink_ext_caps.bits.oled == 1) backlight_control = 1; + //make sure we disable VESA ABC first. + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(uint8_t)); + + if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) { + backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(backlight_enable)); + } + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) @@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return false; if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, sizeof(union dpcd_source_backlight_get))) @@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable) link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return true; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; @@ -524,7 +552,7 @@ bool edp_set_backlight_level(const struct dc_link *link, struct dc *dc = link->ctx->dc; uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16; uint32_t frame_ramp = backlight_level_params->frame_ramp; - DC_LOGGER_INIT(link->ctx->logger); + DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", backlight_pwm_u16_16, backlight_pwm_u16_16); @@ -916,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, // TODO: Handle mux change case if force_static is set // If force_static is set, just change the replay_allow_active state directly if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link); link->replay_settings.replay_allow_active = *allow_active; } @@ -1022,6 +1050,9 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream &alpm_config.raw, sizeof(alpm_config.raw)); } + + link->replay_settings.config.replay_video_conferencing_optimization_enabled = false; + return true; } @@ -1130,11 +1161,11 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link) struct abm *abm = NULL; for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; - struct dc_stream_state *stream = pipe_ctx.stream; + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx->stream; if (stream && stream->link == link) { - abm = pipe_ctx.stream_res.abm; + abm = pipe_ctx->stream_res.abm; break; } } @@ -1170,6 +1201,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } +bool is_smartmux_suported(struct dc_link *link) +{ + if (link->dc->caps.is_apu) + return false; + if (!link->dc->config.smart_mux_version) + return false; + + return true; +} + static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, struct link_resource *link_res, bool enable) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index bcfa6ac5d4e7..4a475d5b9dde 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool is_smartmux_suported(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile index 1e2e66508192..5402c3529f5e 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile @@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401)) AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401) -endif +endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index a0e9e9f0441a..6f0e017a8ae2 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -30,7 +30,6 @@ #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" #include "dc.h" -#include "dcn401/dcn401_mpc.h" #define REG(reg)\ mpc30->mpc_regs->reg @@ -370,275 +369,279 @@ void mpc32_program_shaper_luta_settings( MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); curve = params->arr_curve_points; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0, + if (curve) { + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); -} - - -void mpc32_program_shaper_lutb_settings( - struct mpc *mpc, - const struct pwl_params *params, - uint32_t mpcc_id) -{ - const struct gamma_curve *curve; - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - - REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); - REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, - MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0, + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + } +} + + +void mpc32_program_shaper_lutb_settings( + struct mpc *mpc, + const struct pwl_params *params, + uint32_t mpcc_id) +{ + const struct gamma_curve *curve; + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - curve += 2; - REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0, + REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y); + REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x, + MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + if (curve) { + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + } } @@ -875,7 +878,7 @@ void mpc32_set3dlut_ram10( } -static void mpc32_set_3dlut_mode( +void mpc32_set_3dlut_mode( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, @@ -1018,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, - .set_movable_cm_location = mpc401_set_movable_cm_location, - .populate_lut = mpc401_populate_lut, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h index 9622518826c9..8c9b20bcca85 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h @@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram( enum dc_lut_mode mode, bool is_color_channel_12bits, uint32_t mpcc_id); + +void mpc32_set_3dlut_mode( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17, + uint32_t mpcc_id); + #endif //__DC_MPCC_DCN32_H__ diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index ad67197557ca..f3fb3fe13757 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -47,16 +47,6 @@ void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx); } -void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow) -{ - struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc); - - REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id], - MPCC_MCM_3DLUT_FL_DONE, done, - MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow, - MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow); -} - void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id) { struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc); @@ -304,7 +294,7 @@ void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id) REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1); } -static void program_gamut_remap( +void mpc_program_gamut_remap( struct mpc *mpc, unsigned int mpcc_id, const uint16_t *regval, @@ -436,7 +426,7 @@ void mpc401_set_gamut_remap( if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) { /* Bypass / Disable if type is bypass or hw */ - program_gamut_remap(mpc, mpcc_id, NULL, + mpc_program_gamut_remap(mpc, mpcc_id, NULL, adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0); } else { struct fixed31_32 arr_matrix[12]; @@ -470,12 +460,12 @@ void mpc401_set_gamut_remap( else mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2; - program_gamut_remap(mpc, mpcc_id, arr_reg_val, + mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, mode_select); } } -static void read_gamut_remap(struct mpc *mpc, +void mpc_read_gamut_remap(struct mpc *mpc, int mpcc_id, uint16_t *regval, enum mpcc_gamut_remap_id gamut_remap_block_id, @@ -571,9 +561,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc, struct mpc_grph_gamut_adjustment *adjust) { uint16_t arr_reg_val[12] = {0}; - uint32_t mode_select; + uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0; - read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); + mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) { adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; @@ -618,7 +608,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = { .set_bg_color = mpc1_set_bg_color, .set_movable_cm_location = mpc401_set_movable_cm_location, .update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select, - .get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status, .populate_lut = mpc401_populate_lut, .program_lut_read_write_control = mpc401_program_lut_read_write_control, .program_lut_mode = mpc401_program_lut_mode, diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h index ce6fbcf14d7a..eb0c68d0b0c7 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h @@ -241,23 +241,22 @@ void mpc401_update_3dlut_fast_load_select( int mpcc_id, int hubp_idx); -void mpc401_get_3dlut_fast_load_status( +void mpc_program_gamut_remap( struct mpc *mpc, + unsigned int mpcc_id, + const uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + enum mpcc_gamut_remap_mode_select mode_select); + +void mpc_read_gamut_remap(struct mpc *mpc, int mpcc_id, - uint32_t *done, - uint32_t *soft_underflow, - uint32_t *hard_underflow); + uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + uint32_t *mode_select); void mpc401_update_3dlut_fast_load_select( struct mpc *mpc, int mpcc_id, int hubp_idx); -void mpc401_get_3dlut_fast_load_status( - struct mpc *mpc, - int mpcc_id, - uint32_t *done, - uint32_t *soft_underflow, - uint32_t *hard_underflow); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c index 81857ce6d68d..e7a90a437fff 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c @@ -502,7 +502,7 @@ void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *ref REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate); } -static struct timing_generator_funcs dcn20_tg_funcs = { +static const struct timing_generator_funcs dcn20_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c index f2415eebdc09..772a8bfb949c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c @@ -129,7 +129,7 @@ static void optc201_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 1; } -static struct timing_generator_funcs dcn201_tg_funcs = { +static const struct timing_generator_funcs dcn201_tg_funcs = { .validate_timing = optc201_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c index 78b58a449fa4..ee4665aa49e9 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c @@ -357,7 +357,7 @@ void optc3_tg_init(struct timing_generator *optc) optc1_clear_optc_underflow(optc); } -static struct timing_generator_funcs dcn30_tg_funcs = { +static const struct timing_generator_funcs dcn30_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c index 65e9089b7f31..38f85bc2681a 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c @@ -109,7 +109,7 @@ void optc301_setup_manual_trigger(struct timing_generator *optc) OTG_TRIGA_CLEAR, 1); } -static struct timing_generator_funcs dcn30_tg_funcs = { +static const struct timing_generator_funcs dcn30_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index ef536f37b4ed..4f1830ba619f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c @@ -315,7 +315,7 @@ void optc31_read_otg_state(struct timing_generator *optc, s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } -static struct timing_generator_funcs dcn31_tg_funcs = { +static const struct timing_generator_funcs dcn31_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c index 0e603bad0d12..4a2caca37255 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c @@ -192,7 +192,7 @@ static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc, } -static struct timing_generator_funcs dcn314_tg_funcs = { +static const struct timing_generator_funcs dcn314_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 2cdd19ba634b..b2b226bcd871 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -297,7 +297,7 @@ static void optc32_set_drr( optc32_setup_manual_trigger(optc); } -static struct timing_generator_funcs dcn32_tg_funcs = { +static const struct timing_generator_funcs dcn32_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index b86fe2b094f8..72bff94cb57d 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -428,7 +428,7 @@ static void optc35_set_long_vtotal( } } -static struct timing_generator_funcs dcn35_tg_funcs = { +static const struct timing_generator_funcs dcn35_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, @@ -507,6 +507,7 @@ void dcn35_timing_generator_init(struct optc *optc1) optc1->min_v_blank_interlace = 5; optc1->min_h_sync_width = 4; optc1->min_v_sync_width = 1; + optc1->max_frame_count = 0xFFFFFF; dcn35_timing_generator_set_fgcg( optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index 382ac18e7854..ff79c38287df 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -459,7 +459,7 @@ bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked) return true; } -static struct timing_generator_funcs dcn401_tg_funcs = { +static const struct timing_generator_funcs dcn401_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index f2ba76c1e0c0..782316348941 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -31,6 +31,7 @@ #include <linux/kgdb.h> #include <linux/delay.h> #include <linux/mm.h> +#include <linux/vmalloc.h> #include <asm/byteorder.h> diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile index b8cddef6b3d2..5b42da8b79c2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/Makefile +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -27,6 +27,24 @@ # DCE ############################################################################### +ifdef CONFIG_DRM_AMD_DC_SI +RESOURCE_DCE60 = dce60_resource.o + +AMD_DAL_RESOURCE_DCE60 = $(addprefix $(AMDDALPATH)/dc/resource/dce60/,$(RESOURCE_DCE60)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE60) +endif + +############################################################################### + +RESOURCE_DCE80 = dce80_resource.o + +AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80) + +############################################################################### + RESOURCE_DCE100 = dce100_resource.o AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100)) @@ -57,14 +75,6 @@ AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOUR AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120) -############################################################################### - -RESOURCE_DCE80 = dce80_resource.o - -AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80)) - -AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80) - ifdef CONFIG_DRM_AMD_DC_FP ############################################################################### # DCN diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index e698543ec937..3a51be63f020 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -836,10 +836,10 @@ static enum dc_status build_mapped_resource( return DC_OK; } -static bool dce100_validate_bandwidth( +static enum dc_status dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; @@ -858,7 +858,7 @@ static bool dce100_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz = 0; } - return true; + return DC_OK; } static bool dce100_validate_surface_sets( @@ -1069,7 +1069,7 @@ static bool dce100_resource_construct( pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 40; - dc->caps.i2c_speed_in_khz = 40; + dc->caps.i2c_speed_in_khz_hdcp = 40; dc->caps.max_cursor_size = 128; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dual_link_dvi = true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index 035c6cfdaee5..cccde5a6f3cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -960,10 +960,10 @@ static enum dc_status build_mapped_resource( return DC_OK; } -static bool dce110_validate_bandwidth( +static enum dc_status dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; @@ -1031,7 +1031,7 @@ static bool dce110_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } - return result; + return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 480a50967385..164ba796f64c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -883,10 +883,10 @@ static enum dc_status build_mapped_resource( return DC_OK; } -bool dce112_validate_bandwidth( +enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; @@ -952,7 +952,7 @@ bool dce112_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz, context->bw_ctx.bw.dce.blackout_recovery_time_us); } - return result; + return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } enum dc_status resource_map_phy_clock_resources( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 1f57ebc6f9b4..3efc4c55d2d2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h @@ -42,10 +42,10 @@ enum dc_status dce112_validate_with_context( struct dc_state *context, struct dc_state *old_context); -bool dce112_validate_bandwidth( +enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dce112_add_stream_to_ctx( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 889f314cac65..58b59d52dc9d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -48,7 +48,7 @@ #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" -#include "dce60/dce60_hw_sequencer.h" +#include "dce60/dce60_hwseq.h" #include "dce100/dce100_resource.h" #include "dce/dce_panel_cntl.h" @@ -863,10 +863,10 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool) } } -static bool dce60_validate_bandwidth( +static enum dc_status dce60_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; @@ -885,7 +885,7 @@ static bool dce60_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz = 0; } - return true; + return DC_OK; } static bool dce60_validate_surface_sets( diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h index 5d653a76b0b0..5d653a76b0b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 3d5113f010bb..3e8b0ac11d90 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -869,10 +869,10 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool) } } -static bool dce80_validate_bandwidth( +static enum dc_status dce80_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; @@ -891,7 +891,7 @@ static bool dce80_validate_bandwidth( context->bw_ctx.bw.dce.yclk_khz = 0; } - return true; + return DC_OK; } static bool dce80_validate_surface_sets( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index e92f14d50adb..652c05c35494 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -23,6 +23,7 @@ * */ +#include "core_status.h" #include "dm_services.h" #include "dc.h" @@ -1125,18 +1126,18 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) *pool = NULL; } -static bool dcn10_validate_bandwidth( +static enum dc_status dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; DC_FP_START(); - voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); + voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode); DC_FP_END(); - return voltage_supported; + return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) @@ -1245,6 +1246,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; + + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && pool->stream_enc[i]->id == + link->dpia_preferred_eng_id) + return pool->stream_enc[i]; } } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index e4eca3e32c1b..f9cbdad3ef37 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -2124,22 +2124,22 @@ validate_out: return out; } -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); if (!pipes) - return false; + return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); - return voltage_supported; + return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( @@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index 4cee3fa11a7f..e997d35a8b86 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params( struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode); void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context); @@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 43fa2cb117f3..e4a1338d21e0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 4bd5c2278596..918742a42ded 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -923,22 +923,22 @@ validate_out: * with DC_FP_START()/DC_FP_END(). Use the same approach as for * dcn20_validate_bandwidth in dcn20_resource.c. */ -static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) +static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); if (!pipes) - return false; + return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); - return voltage_supported; + return voltage_supported ? DC_OK : DC_NOT_SUPPORTED; } static void dcn21_destroy_resource_pool(struct resource_pool **pool) @@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h index f7ecc002c2f7..a017fd9854d1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h @@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index f01ced015072..895349d9ca07 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; DC_FP_START(); - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw( display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only) { bool out = false; @@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw( context->bw_ctx.dml.vba.VoltageLevel = 0; context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw( dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - if (!fast_validate || !allow_self_refresh_only) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) { /* * DML favors voltage over p-state, but we're more interested in * supporting p-state over voltage. We can't support p-state in @@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw( vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } if (allow_self_refresh_only && - (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { /* * If mode is unsupported or there's still no p-state support @@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw( * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. */ - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = dm_allow_self_refresh; @@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw( } if (repopulate_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); context->bw_ctx.dml.vba.VoltageLevel = vlevel; *vlevel_out = vlevel; *pipe_cnt_out = pipe_cnt; @@ -1891,8 +1891,6 @@ static int get_refresh_rate(struct dc_state *context) /* check if refresh rate at least 120hz */ timing = &context->streams[0]->timing; - if (timing == NULL) - return 0; h_v_total = timing->h_total * timing->v_total; if (h_v_total == 0) @@ -2037,9 +2035,9 @@ void dcn30_calculate_wm_and_dlg( DC_FP_END(); } -bool dcn30_validate_bandwidth(struct dc *dc, +enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -2057,7 +2055,7 @@ bool dcn30_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); if (pipe_cnt == 0) @@ -2068,7 +2066,7 @@ bool dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -2094,7 +2092,7 @@ validate_out: BW_VAL_TRACE_FINISH(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) @@ -2588,6 +2586,8 @@ static bool dcn30_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 8e6b8b7368fd..2c967fe55712 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h @@ -56,15 +56,15 @@ unsigned int dcn30_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); -bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); +enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, + enum dc_validate_mode validate_mode); bool dcn30_internal_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, @@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_acquire_post_bldn_3dlut( struct resource_context *res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 121a86a59833..82a205a7c25c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 012c5fd52cb1..3345068a878c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -1481,6 +1481,8 @@ static bool dcn302_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index a8d0b4686f9a..3479e1eab4cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index dddddbfef85f..3ed7f50554e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format) int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { pipes[i].pipe.src.gpuvm = 1; @@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc, int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context( bool upscaled = false; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1758,9 +1758,9 @@ dcn31_set_mcif_arb_params(struct dc *dc, DC_FP_END(); } -bool dcn31_validate_bandwidth(struct dc *dc, +enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1778,19 +1778,19 @@ bool dcn31_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1813,7 +1813,7 @@ validate_out: BW_VAL_TRACE_FINISH(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config) @@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -1954,6 +1956,9 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; dc->config.disable_hbr_audio_dp2 = true; @@ -2199,6 +2204,8 @@ static bool dcn31_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; @@ -2228,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool( kfree(pool); return NULL; } + +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output) +{ + struct dc_state *state = link->dc->current_state; + int i; + +#if defined(CONFIG_DRM_AMD_DC_FP) + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) + link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]); + + for (i = 0; i < pipe_count; i++) { + link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]); + + // Setup audio + if (pipes[i].stream_res.audio != NULL) + build_audio_output(state, &pipes[i], &audio_output[i]); + } +#else + /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching, + * but the above will not compile on architectures without an FPU. + */ + DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__); + ASSERT(0); +#endif + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 551ad912f7be..c32c85ef0ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -37,9 +37,9 @@ struct dcn31_resource_pool { struct resource_pool base; }; -bool dcn31_validate_bandwidth(struct dc *dc, +enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg( int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, @@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool( unsigned int dcn31_get_det_buffer_size( const struct dc_state *context); +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); + /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 26becc4cb804..663c49cce4aa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -926,6 +926,7 @@ static const struct dc_debug_options debug_defaults_drv = { .seamless_boot_odm_combine = true, .enable_legacy_fast_update = true, .using_dml2 = false, + .disable_dsc_power_gate = true, }; static const struct dc_panel_config panel_config_defaults = { @@ -1667,12 +1668,12 @@ static struct clock_source *dcn31_clock_source_create( static int dcn314_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt; DC_FP_START(); - pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate); + pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); DC_FP_END(); return pipe_cnt; @@ -1694,9 +1695,9 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi *panel_config = panel_config_defaults; } -bool dcn314_validate_bandwidth(struct dc *dc, +enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1715,19 +1716,19 @@ bool dcn314_validate_bandwidth(struct dc *dc, DC_FP_START(); // do not support self refresh only - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1750,7 +1751,7 @@ validate_out: BW_VAL_TRACE_FINISH(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static struct resource_funcs dcn314_res_pool_funcs = { @@ -1779,7 +1780,9 @@ static struct resource_funcs dcn314_res_pool_funcs = { .get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -1885,6 +1888,9 @@ static bool dcn314_resource_construct( dc->caps.max_disp_clock_khz_at_vmin = 650000; + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2114,6 +2120,8 @@ static bool dcn314_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index 49ffe71018df..ac9bb7f097d5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h @@ -39,9 +39,9 @@ struct dcn314_resource_pool { struct resource_pool base; }; -bool dcn314_validate_bandwidth(struct dc *dc, +enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); struct resource_pool *dcn314_create_resource_pool( const struct dc_init_data *init_data, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 6c2bb3f63be1..82cc78c291d8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; @@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context( bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { @@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = { .get_panel_config_defaults = dcn315_get_panel_config_defaults, .get_power_profile = dcn315_get_power_profile, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn315_resource_construct( @@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 568094827212..636110e48d01 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format) static int dcn316_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context( const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn316_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn316_resource_construct( @@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 2a59cc61ed8c..9917b366f00c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -24,6 +24,7 @@ * */ +#include "dc_types.h" #include "dm_services.h" #include "dc.h" @@ -1741,7 +1742,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, } } -static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) +static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode) { bool out = false; @@ -1766,7 +1767,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val goto validate_fail; DC_FP_START(); - out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode); DC_FP_END(); if (pipe_cnt == 0) @@ -1777,7 +1778,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1806,25 +1807,62 @@ validate_out: return out; } -bool dcn32_validate_bandwidth(struct dc *dc, +enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { - bool out = false; + unsigned int i; + enum dc_status status; + const struct dc_stream_state *stream; + + /* reset cursor limitations on subvp */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) { + dc_state_set_stream_cursor_subvp_limit(stream, context, false); + } + } if (dc->debug.using_dml2) - out = dml2_validate(dc, context, + status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - out = dml1_validate(dc, context, fast_validate); - return out; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { + /* check new stream configuration still supports cursor if subvp used */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM && + stream->cursor_position.enable && + !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) { + /* hw cursor cannot be supported with subvp active, so disable subvp for now */ + dc_state_set_stream_cursor_subvp_limit(stream, context, true); + status = DC_FAIL_HW_CURSOR_SUPPORT; + } + }; + } + + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { + /* attempt to validate again with subvp disabled due to cursor */ + if (dc->debug.using_dml2) + status = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + else + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + } + + return status; } int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1840,7 +1878,7 @@ int dcn32_populate_dml_pipes_from_context( int num_subvp_none = 0; int odm_slice_count; - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); /* For single display subvp, look for subvp main so if we have phantom * pipe, we can set odm policy to match main pipe @@ -1922,7 +1960,7 @@ int dcn32_populate_dml_pipes_from_context( /* Only populate DML input with subvp info for full updates. * This is just a workaround -- needs a proper fix. */ - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { switch (dc_state_get_pipe_subvp_type(context, pipe)) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; @@ -2023,25 +2061,31 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } +unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, + struct dc_state *state, + const struct dc_stream_state *stream) +{ + bool limit_cur_to_buf; + + limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) && + !stream->hw_cursor_req; + + return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size; +} + static struct resource_funcs dcn32_res_pool_funcs = { .destroy = dcn32_destroy_resource_pool, .link_enc_create = dcn32_link_encoder_create, @@ -2067,7 +2111,8 @@ static struct resource_funcs dcn32_res_pool_funcs = { .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -2114,8 +2159,6 @@ static bool dcn32_resource_construct( #define REG_STRUCT dccg_regs dccg_regs_init(); - DC_FP_START(); - ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_dcn32; @@ -2153,6 +2196,7 @@ static bool dcn32_resource_construct( dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ dc->caps.max_cursor_size = 64; + dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4) dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; @@ -2207,7 +2251,7 @@ static bool dcn32_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -2226,6 +2270,7 @@ static bool dcn32_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2455,6 +2500,8 @@ static bool dcn32_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2469,7 +2516,6 @@ static bool dcn32_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2501,14 +2547,14 @@ static bool dcn32_resource_construct( if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; - DC_FP_END(); + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; return true; create_fail: - DC_FP_END(); - dcn32_resource_destruct(pool); return false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 1aa4ced29291..82f966cf4ed2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -98,14 +98,14 @@ void dcn32_add_phantom_pipes(struct dc *dc, unsigned int pipe_cnt, unsigned int index); -bool dcn32_validate_bandwidth(struct dc *dc, +enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, @@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes); +unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, + struct dc_state *state, + const struct dc_stream_state *stream); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 38d76434683e..061c0907d802 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1624,7 +1618,8 @@ static struct resource_funcs dcn321_res_pool_funcs = { .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1709,6 +1704,7 @@ static bool dcn321_resource_construct( dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ dc->caps.max_cursor_size = 64; + dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4) dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; @@ -1759,8 +1755,8 @@ static bool dcn321_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; + dc->caps.color.dpp.hw_3d_lut = 0; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; @@ -1778,6 +1774,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2002,6 +1999,8 @@ static bool dcn321_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2016,7 +2015,6 @@ static bool dcn321_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2044,6 +2042,10 @@ static bool dcn321_resource_construct( dc->dml2_options.max_segments_per_hubp = 18; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index ffd2b816cd02..8475c6eec547 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1732,24 +1732,24 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config } -static bool dcn35_validate_bandwidth(struct dc *dc, +static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) - return out; + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); dcn35_decide_zstate_support(dc, context); DC_FP_END(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state) @@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn35_resource_construct( @@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1893,6 +1895,13 @@ static bool dcn35_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. @@ -1903,7 +1912,7 @@ static bool dcn35_resource_construct( dc->caps.max_disp_clock_khz_at_vmin = 650000; /* Sequential ONO is based on ASIC. */ - if (dc->ctx->asic_id.hw_internal_rev > 0x10) + if (dc->ctx->asic_id.hw_internal_rev >= 0x40) dc->caps.sequential_ono = true; /* Use pipe context based otg sync logic */ @@ -2148,12 +2157,13 @@ static bool dcn35_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 98f5bc1b929e..0971c0f74186 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1712,24 +1712,24 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config } -static bool dcn351_validate_bandwidth(struct dc *dc, +static enum dc_status dcn351_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) - return out; + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); dcn35_decide_zstate_support(dc, context); DC_FP_END(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } static struct resource_funcs dcn351_res_pool_funcs = { @@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn351_resource_construct( @@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1865,6 +1867,13 @@ static bool dcn351_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. @@ -2119,13 +2128,14 @@ static bool dcn351_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index b6468573dc33..8bae7fcedc22 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -1713,24 +1713,24 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config } -static bool dcn35_validate_bandwidth(struct dc *dc, +static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) - return out; + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); dcn35_decide_zstate_support(dc, context); DC_FP_END(); - return out; + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } @@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn36_resource_construct( @@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1866,6 +1868,13 @@ static bool dcn36_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. @@ -1876,7 +1885,7 @@ static bool dcn36_resource_construct( dc->caps.max_disp_clock_khz_at_vmin = 650000; /* Sequential ONO is based on ASIC. */ - if (dc->ctx->asic_id.hw_internal_rev > 0x10) + if (dc->ctx->asic_id.hw_internal_rev >= 0x40) dc->caps.sequential_ono = true; /* Use pipe context based otg sync logic */ @@ -2121,12 +2130,13 @@ static bool dcn36_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 7436dfbdf927..b3988e38d0a6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -70,7 +70,6 @@ #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml/dcn401/dcn401_fpu.h" #include "dc_state_priv.h" @@ -1608,10 +1607,6 @@ static struct dc_cap_funcs cap_funcs = { static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - /* re-calculate the available MALL size if required */ if (bw_params->num_channels > 0) { dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( @@ -1622,15 +1617,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b DC_FP_START(); - dcn401_update_bw_bounding_box_fpu(dc, bw_params); - - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1642,16 +1633,52 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta return DC_OK; } -bool dcn401_validate_bandwidth(struct dc *dc, +enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { - bool out = false; + unsigned int i; + enum dc_status status = DC_OK; + const struct dc_stream_state *stream; + + /* reset cursor limitations on subvp */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) { + dc_state_set_stream_cursor_subvp_limit(stream, context, false); + } + } + if (dc->debug.using_dml2) - out = dml2_validate(dc, context, + status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); - return out; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { + /* check new stream configuration still supports cursor if subvp used */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + + if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM && + stream->cursor_position.enable && + !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) { + /* hw cursor cannot be supported with subvp active, so disable subvp for now */ + dc_state_set_stream_cursor_subvp_limit(stream, context, true); + status = DC_FAIL_HW_CURSOR_SUPPORT; + } + }; + } + + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { + /* attempt to validate again with subvp disabled due to cursor */ + if (dc->debug.using_dml2) + status = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + } + + return status; } void dcn401_prepare_mcache_programming(struct dc *dc, @@ -1770,7 +1797,8 @@ static struct resource_funcs dcn401_res_pool_funcs = { .build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params, .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, .get_power_profile = dcn401_get_power_profile, - .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe, + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1846,8 +1874,9 @@ static bool dcn401_resource_construct( dc->caps.max_downscale_ratio = 600; dc->caps.i2c_speed_in_khz = 95; dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/ - /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ + /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */ dc->caps.max_cursor_size = 64; + dc->caps.max_buffered_cursor_size = 64; dc->caps.cursor_not_scaled = true; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; @@ -1900,8 +1929,8 @@ static bool dcn401_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; + dc->caps.color.dpp.hw_3d_lut = 0; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; @@ -1919,8 +1948,30 @@ static bool dcn401_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->config.use_spl = true; dc->config.prefer_easf = true; + + dc->config.dcn_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_sharpness_range.sdr_rgb_max = 1750; + dc->config.dcn_sharpness_range.sdr_rgb_mid = 750; + dc->config.dcn_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500; + + dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250; + dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250; + dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500; + dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ @@ -2139,6 +2190,8 @@ static bool dcn401_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2157,7 +2210,6 @@ static bool dcn401_resource_construct( dc->config.sdpif_request_limit_words_per_umc = 16; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.map_dc_pipes_with_callbacks = true; @@ -2190,6 +2242,10 @@ static bool dcn401_resource_construct( /* SPL */ dc->caps.scl_caps.sharpener_support = true; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index 4c259745d519..2ae6831c31ef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -22,9 +22,9 @@ struct resource_pool *dcn401_create_resource_pool( enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state); -bool dcn401_validate_bandwidth(struct dc *dc, +enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index 28348734d900..55b929ca7982 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; struct spl_rect mpc_rec; - if (use_recout_width_aligned) { + if (spl_in->basic_in.custom_width != 0) { + mpc_rec.width = spl_in->basic_in.custom_width; + mpc_rec.x = spl_in->basic_in.custom_x; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + } else if (use_recout_width_aligned) { mpc_rec.width = recout_width_align; if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) { mpc_rec.width = plane_clip_rec->width % recout_width_align; @@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( /* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as: */ - if (mpc_slice_idx > epimo) { + if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) { mpc_rec.x += mpc_slice_idx - epimo - 1; mpc_rec.width += 1; } @@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i odm_rec.x = odm_slice_width * odm_slice_idx; odm_rec.width = is_last_odm_slice ? - /* last slice width is the reminder of h_active */ - h_active - odm_slice_width * (odm_slice_count - 1) : - /* odm slice width is the floor of h_active / count */ - odm_slice_width; + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; odm_rec.y = 0; odm_rec.height = v_active; @@ -776,7 +781,7 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in, * Do not bypass UV at 1:1 for cositing to be applied */ if (!enable_isharp) { - if (data->ratios.horz.value == one && data->ratios.vert.value == one) + if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale) return SCL_MODE_SCALING_420_LUMA_BYPASS; } @@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, /* Calculate number of tap with adaptive scaling off */ static void spl_get_taps_non_adaptive_scaler( - struct spl_scratch *spl_scratch, const struct spl_taps *in_taps) + struct spl_scratch *spl_scratch, + const struct spl_taps *in_taps, + bool is_subsampled) { bool check_max_downscale = false; @@ -944,13 +951,14 @@ static void spl_get_taps_non_adaptive_scaler( spl_fixpt_from_fraction(6, 1)); SPL_ASSERT(check_max_downscale); + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) spl_scratch->scl_data.taps.h_taps = 1; if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled) spl_scratch->scl_data.taps.v_taps_c = 1; } @@ -965,13 +973,13 @@ static bool spl_get_optimal_number_of_taps( unsigned int max_taps_y, max_taps_c; unsigned int min_taps_y, min_taps_c; enum lb_memory_config lb_config; - bool skip_easf = false; + bool skip_easf = false; bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format); if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && spl_scratch->scl_data.viewport.width > max_downscale_src_width) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -980,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( /* Disable adaptive scaler and sharpener when integer scaling is enabled */ if (spl_in->scaling_quality.integer_scaling) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -995,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps); + if (skip_easf) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); + } else { if (spl_is_video_format(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; @@ -1122,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps( (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) { spl_scratch->scl_data.taps.h_taps = 1; spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; @@ -1147,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps( if ((!*enable_easf_v) && !is_subsampled && (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))) spl_scratch->scl_data.taps.v_taps_c = 1; + } } return true; @@ -1297,7 +1306,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s if (enable_easf_v) { dscl_prog_data->easf_v_en = true; dscl_prog_data->easf_v_ring = 0; - dscl_prog_data->easf_v_sharp_factor = 0; + dscl_prog_data->easf_v_sharp_factor = 1; dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode /* 2-bit, BF3 chroma mode correction calculation mode */ @@ -1461,7 +1470,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s if (enable_easf_h) { dscl_prog_data->easf_h_en = true; dscl_prog_data->easf_h_ring = 0; - dscl_prog_data->easf_h_sharp_factor = 0; + dscl_prog_data->easf_h_sharp_factor = 1; dscl_prog_data->easf_h_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable dscl_prog_data->easf_h_bf2_mode = @@ -1898,3 +1907,4 @@ bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out spl_set_taps_data(dscl_prog_data, data); return res; } + diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 1c3949b24611..23d254dea18f 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -460,6 +460,8 @@ struct basic_in { enum spl_color_space color_space; // Color Space unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this? + int custom_width; // Width for non-standard segmentation - used when != 0 + int custom_x; // Start x for non-standard segmentation - used when custom_width != 0 }; // Basic output information @@ -480,6 +482,10 @@ enum sharpness_setting { SHARPNESS_ZERO, SHARPNESS_CUSTOM }; +enum sharpness_range_source { + SHARPNESS_RANGE_DCN = 0, + SHARPNESS_RANGE_DCN_OVERRIDE +}; struct spl_sharpness_range { int sdr_rgb_min; int sdr_rgb_max; diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c index 52d97918a3bd..ebf0287417e0 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c @@ -29,8 +29,6 @@ static inline unsigned long long spl_complete_integer_division_u64( { unsigned long long result; - SPL_ASSERT(divisor); - result = spl_div64_u64_rem(dividend, divisor, remainder); return result; @@ -196,8 +194,6 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg) * Good idea to use Newton's method */ - SPL_ASSERT(arg.value); - return spl_fixpt_from_fraction( spl_fixpt_one.value, arg.value); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 4e0efff92dca..0bafb6710761 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -51,8 +51,8 @@ * for the cache windows. * * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare - * for command submission. Commands can be queued via dmub_srv_cmd_queue() - * and executed via dmub_srv_cmd_execute(). + * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue() + * and executed via dmub_srv_fb_cmd_execute(). * * If the queue is full the dmub_srv_wait_for_idle() call can be used to * wait until the queue has been cleared. @@ -129,7 +129,9 @@ enum dmub_window_id { DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, DMUB_WINDOW_7_SCRATCH_MEM, + DMUB_WINDOW_IB_MEM, DMUB_WINDOW_SHARED_STATE, + DMUB_WINDOW_LSDMA_BUFFER, DMUB_WINDOW_TOTAL, }; @@ -142,6 +144,7 @@ enum dmub_notification_type { DMUB_NOTIFICATION_SET_CONFIG_REPLY, DMUB_NOTIFICATION_DPIA_NOTIFICATION, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, + DMUB_NOTIFICATION_FUSED_IO, DMUB_NOTIFICATION_MAX }; @@ -170,6 +173,13 @@ enum dmub_srv_power_state_type { DMUB_POWER_STATE_D3 = 8 }; +/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */ +enum dmub_inbox_cmd_interface_type { + DMUB_CMD_INTERFACE_DEFAULT = 0, + DMUB_CMD_INTERFACE_FB = 1, + DMUB_CMD_INTERFACE_REG = 2, +}; + /** * struct dmub_region - dmub hw memory region * @base: base address for region, must be 256 byte aligned @@ -347,6 +357,22 @@ struct dmub_diagnostic_data { uint8_t is_traceport_en : 1; uint8_t is_cw0_enabled : 1; uint8_t is_cw6_enabled : 1; + uint8_t is_pwait : 1; +}; + +struct dmub_srv_inbox { + /* generic status */ + uint64_t num_submitted; + uint64_t num_reported; + union { + /* frame buffer mailbox status */ + struct dmub_rb rb; + /* register mailbox status */ + struct { + bool is_pending; + bool is_multi_pending; + }; + }; }; /** @@ -422,6 +448,8 @@ struct dmub_srv_hw_funcs { uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub); + uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub); + void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); bool (*is_supported)(struct dmub_srv *dmub); @@ -462,18 +490,21 @@ struct dmub_srv_hw_funcs { void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx); void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index); + void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub); void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub); + void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub); + void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable); + uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub); void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub); void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg); void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp); uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub); - void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable); void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable); }; @@ -493,6 +524,7 @@ struct dmub_srv_create_params { enum dmub_asic asic; uint32_t fw_version; bool is_virtual; + enum dmub_inbox_cmd_interface_type inbox_type; }; /** @@ -510,6 +542,7 @@ struct dmub_srv { uint32_t fw_version; bool is_virtual; struct dmub_fb scratch_mem_fb; + struct dmub_fb ib_mem_gart; volatile struct dmub_shared_state_feature_block *shared_state; volatile const struct dmub_fw_state *fw_state; @@ -521,8 +554,9 @@ struct dmub_srv { const struct dmub_srv_dcn401_regs *regs_dcn401; struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; - struct dmub_rb inbox1_rb; + struct dmub_srv_inbox inbox1; uint32_t inbox1_last_wptr; + struct dmub_srv_inbox reg_inbox0; /** * outbox1_rb is accessed without locks (dal & dc) * and to be used only in dmub_srv_stat_get_notification() @@ -542,9 +576,11 @@ struct dmub_srv { struct dmub_fw_meta_info meta_info; struct dmub_feature_caps feature_caps; struct dmub_visual_confirm_color visual_confirm_color; + enum dmub_inbox_cmd_interface_type inbox_type; enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; + struct dmub_fb lsdma_rb_fb; }; /** @@ -566,22 +602,11 @@ struct dmub_notification { struct aux_reply_data aux_reply; enum dp_hpd_status hpd_status; enum set_config_status sc_status; - /** - * DPIA notification command. - */ - struct dmub_rb_cmd_dpia_notification dpia_notification; struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify; + struct dmub_cmd_fused_request fused_request; }; }; -/* enum dmub_ips_mode - IPS mode identifier */ -enum dmub_ips_mode { - DMUB_IPS_MODE_IPS1_MAX = 0, - DMUB_IPS_MODE_IPS2, - DMUB_IPS_MODE_IPS1_RCG, - DMUB_IPS_MODE_IPS1_ONO2_ON -}; - /** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. @@ -699,19 +724,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); /** - * dmub_srv_sync_inbox1() - sync sw state with hw state - * @dmub: the dmub service - * - * Sync sw state with hw state when resume from S0i3 - * - * Return: - * DMUB_STATUS_OK - success - * DMUB_STATUS_INVALID - unspecified error - */ -enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub); - -/** - * dmub_srv_cmd_queue() - queues a command to the DMUB + * dmub_srv_fb_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service * @cmd: the command to queue * @@ -723,11 +736,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub); * DMUB_STATUS_QUEUE_FULL - no remaining room in queue * DMUB_STATUS_INVALID - unspecified error */ -enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, +enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub, const union dmub_rb_cmd *cmd); /** - * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub + * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub * @dmub: the dmub service * * Begins execution of queued commands on the dmub. @@ -736,7 +749,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, * DMUB_STATUS_OK - success * DMUB_STATUS_INVALID - unspecified error */ -enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub); +enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub); /** * dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed @@ -795,6 +808,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, uint32_t timeout_us); /** + * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the commands queued prior to this call are complete. + * If interfaces remain busy due to additional work being submitted + * concurrently, this function will not continue to wait. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub, + uint32_t timeout_us); + +/** * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle * @dmub: the dmub service * @timeout_us: the maximum number of microseconds to wait @@ -892,9 +922,6 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub, union dmub_fw_boot_options *option); -enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, - union dmub_rb_cmd *cmd); - enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, bool skip); @@ -959,26 +986,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index); /** - * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command - * being processed by DMUB. - * @dmub: The dmub service - * @cmd: The dmub command being sent. If with_replay is true, the function will - * update cmd with replied data. - * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the - * cmd doesn't need to be replied. - * @timeout_us: timeout in microseconds. - * - * Return: - * DMUB_STATUS_OK - success - * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout - * interval. - */ -enum dmub_status dmub_srv_send_reg_inbox0_cmd( - struct dmub_srv *dmub, - union dmub_rb_cmd *cmd, - bool with_reply, uint32_t timeout_us); - -/** * dmub_srv_set_power_state() - Track DC power state in dmub_srv * @dmub: The dmub service * @power_state: DC power state setting @@ -990,4 +997,71 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd( */ void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state); +/** + * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub + * @dmub: the dmub service + * @cmd: the command packet to be executed + * + * Executes a single command for the dmub. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); + + +/** + * dmub_srv_cmd_get_response() - Copies return data for command into buffer + * @dmub: the dmub service + * @cmd_rsp: response buffer + * + * Copies return data for command into buffer + */ +void dmub_srv_cmd_get_response(struct dmub_srv *dmub, + union dmub_rb_cmd *cmd_rsp); + +/** + * dmub_srv_sync_inboxes() - Sync inbox state + * @dmub: the dmub service + * + * Sync inbox state + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub); + +/** + * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free up + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * @num_free_required: number of free entries required + * + * Waits until the DMUB buffer is freed to the specified number. + * The maximum wait time is given in microseconds to prevent spinning + * forever. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, + uint32_t timeout_us, + uint32_t num_free_required); + +/** + * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0 + * @dmub: the dmub service + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_HW_FAILURE - issue with HW programming + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub); + #endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 1f5f4e3e49d4..c587b3441e07 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -36,6 +36,9 @@ //<DMUB_TYPES>================================================================== /* Basic type definitions. */ +#ifdef __forceinline +#undef __forceinline +#endif #define __forceinline inline /** @@ -101,6 +104,14 @@ */ #define DMUB_MAX_FPO_STREAMS 4 +/* Define to ensure that the "common" members always appear in the same + * order in different structs for back compat purposes + */ +#define COMMON_STREAM_STATIC_SUB_STATE \ + struct dmub_fams2_cmd_legacy_stream_static_state legacy; \ + struct dmub_fams2_cmd_subvp_stream_static_state subvp; \ + struct dmub_fams2_cmd_drr_stream_static_state drr; + /* Maximum number of streams on any ASIC. */ #define DMUB_MAX_STREAMS 6 @@ -288,6 +299,31 @@ union dmub_addr { } u; /*<< Low/high bit access */ uint64_t quad_part; /*<< 64 bit address */ }; + +/* Flattened structure containing SOC BB parameters stored in the VBIOS + * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters. + * This also prevents alighment issues when new parameters are added to the SoC BB. + * The following parameters should be added since these values can't be obtained elsewhere: + * -dml2_soc_power_management_parameters + * -dml2_soc_vmin_clock_limits + */ +struct dmub_soc_bb_params { + uint32_t dram_clk_change_blackout_ns; + uint32_t dram_clk_change_read_only_ns; + uint32_t dram_clk_change_write_only_ns; + uint32_t fclk_change_blackout_ns; + uint32_t g7_ppt_blackout_ns; + uint32_t stutter_enter_plus_exit_latency_ns; + uint32_t stutter_exit_latency_ns; + uint32_t z8_stutter_enter_plus_exit_latency_ns; + uint32_t z8_stutter_exit_latency_ns; + uint32_t z8_min_idle_time_ns; + uint32_t type_b_dram_clk_change_blackout_ns; + uint32_t type_b_ppt_blackout_ns; + uint32_t vmin_limit_dispclk_khz; + uint32_t vmin_limit_dcfclk_khz; + uint32_t g7_temperature_read_blackout_ns; +}; #pragma pack(pop) /** @@ -547,6 +583,11 @@ union replay_hw_flags { * @is_alpm_initialized: Indicates whether ALPM is initialized */ uint32_t is_alpm_initialized : 1; + + /** + * @alpm_mode: Indicates ALPM mode selected + */ + uint32_t alpm_mode : 2; } bitfields; uint32_t u32All; @@ -739,6 +780,21 @@ enum dmub_ips_disable_type { DMUB_IPS_DISABLE_IPS2_Z10 = 4, DMUB_IPS_DISABLE_DYNAMIC = 5, DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6, + DMUB_IPS_DISABLE_Z8_RETENTION = 7, +}; + +enum dmub_ips_rcg_disable_type { + DMUB_IPS_RCG_ENABLE = 0, + DMUB_IPS0_RCG_DISABLE = 1, + DMUB_IPS1_RCG_DISABLE = 2, + DMUB_IPS_RCG_DISABLE = 3 +}; + +enum dmub_ips_in_vpb_disable_type { + DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour + DMUB_IPS_VPB_DISABLE_ALL = 1, + DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2, + DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG }; #define DMUB_IPS1_ALLOW_MASK 0x00000001 @@ -746,6 +802,17 @@ enum dmub_ips_disable_type { #define DMUB_IPS1_COMMIT_MASK 0x00000004 #define DMUB_IPS2_COMMIT_MASK 0x00000008 +enum dmub_ips_comand_type { + /** + * Start/stop IPS residency measurements for a given IPS mode + */ + DMUB_CMD__IPS_RESIDENCY_CNTL = 0, + /** + * Query IPS residency information for a given IPS mode + */ + DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -815,13 +882,14 @@ enum dmub_shared_state_feature_id { /** * struct dmub_shared_state_ips_fw - Firmware signals for IPS. */ -union dmub_shared_state_ips_fw_signals { + union dmub_shared_state_ips_fw_signals { struct { - uint32_t ips1_commit : 1; /**< 1 if in IPS1 */ + uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */ uint32_t detection_required : 1; /**< 1 if detection is required */ - uint32_t reserved_bits : 28; /**< Reversed */ + uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */ + uint32_t reserved_bits : 27; /**< Reversed */ } bits; uint32_t all; }; @@ -829,14 +897,19 @@ union dmub_shared_state_ips_fw_signals { /** * struct dmub_shared_state_ips_signals - Firmware signals for IPS. */ -union dmub_shared_state_ips_driver_signals { + union dmub_shared_state_ips_driver_signals { struct { uint32_t allow_pg : 1; /**< 1 if PG is allowed */ uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */ - uint32_t reserved_bits : 27; /**< Reversed bits */ + uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */ + uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */ + uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */ + uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */ + uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */ + uint32_t reserved_bits : 22; /**< Reversed bits */ } bits; uint32_t all; }; @@ -865,7 +938,9 @@ struct dmub_shared_state_ips_fw { uint32_t ips1_exit_count; /**< Exit counter for IPS1 */ uint32_t ips2_entry_count; /**< Entry counter for IPS2 */ uint32_t ips2_exit_count; /**< Exit counter for IPS2 */ - uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */ + uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */ + uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */ + uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */ }; /* 248-bytes, fixed */ /** @@ -1253,6 +1328,10 @@ enum dmub_gpint_command { * DESC: Setup debug configs. */ DMUB_GPINT__SETUP_DEBUG_MODE = 136, + /** + * DESC: Initiates IPS wake sequence. + */ + DMUB_GPINT__IPS_DEBUG_WAKE = 137, }; /** @@ -1482,6 +1561,16 @@ enum dmub_cmd_type { */ DMUB_CMD__FUSED_IO = 89, + /** + * Command type used for all LSDMA commands. + */ + DMUB_CMD__LSDMA = 90, + + /** + * Command type use for all IPS commands. + */ + DMUB_CMD__IPS = 91, + DMUB_CMD__VBIOS = 128, }; @@ -1892,6 +1981,121 @@ struct dmub_rb_cmd_fams2_flip { struct dmub_fams2_flip_info flip_info; }; +struct dmub_cmd_lsdma_data { + union { + struct lsdma_init_data { + union dmub_addr gpu_addr_base; + uint32_t ring_size; + } init_data; + struct lsdma_tiled_copy_data { + uint32_t src_addr_lo; + uint32_t src_addr_hi; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t src_width : 16; + uint32_t src_height : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t dst_width : 16; + uint32_t dst_height : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_swizzle_mode : 5; + uint32_t src_mip_max : 5; + uint32_t src_mip_id : 5; + uint32_t dst_mip_max : 5; + uint32_t dst_swizzle_mode : 5; + uint32_t dst_mip_id : 5; + uint32_t tmz : 1; + uint32_t dcc : 1; + + uint32_t data_format : 6; + uint32_t padding1 : 4; + uint32_t dst_element_size : 3; + uint32_t num_type : 3; + uint32_t src_element_size : 3; + uint32_t write_compress : 2; + uint32_t cache_policy_dst : 2; + uint32_t cache_policy_src : 2; + uint32_t read_compress : 2; + uint32_t src_dim : 2; + uint32_t dst_dim : 2; + uint32_t max_uncom : 1; + + uint32_t max_com : 2; + uint32_t padding : 30; + } tiled_copy_data; + struct lsdma_linear_copy_data { + uint32_t count : 30; + uint32_t cache_policy_dst : 2; + + uint32_t tmz : 1; + uint32_t cache_policy_src : 2; + uint32_t padding : 29; + + uint32_t src_lo; + uint32_t src_hi; + uint32_t dst_lo; + uint32_t dst_hi; + } linear_copy_data; + struct lsdma_reg_write_data { + uint32_t reg_addr; + uint32_t reg_data; + } reg_write_data; + struct lsdma_pio_copy_data { + union { + struct { + uint32_t byte_count : 26; + uint32_t src_loc : 1; + uint32_t dst_loc : 1; + uint32_t src_addr_inc : 1; + uint32_t dst_addr_inc : 1; + uint32_t overlap_disable : 1; + uint32_t constant_fill : 1; + } fields; + uint32_t raw; + } packet; + uint32_t src_lo; + uint32_t src_hi; + uint32_t dst_lo; + uint32_t dst_hi; + } pio_copy_data; + struct lsdma_pio_constfill_data { + union { + struct { + uint32_t byte_count : 26; + uint32_t src_loc : 1; + uint32_t dst_loc : 1; + uint32_t src_addr_inc : 1; + uint32_t dst_addr_inc : 1; + uint32_t overlap_disable : 1; + uint32_t constant_fill : 1; + } fields; + uint32_t raw; + } packet; + uint32_t dst_lo; + uint32_t dst_hi; + uint32_t data; + } pio_constfill_data; + + uint32_t all[14]; + } u; + +}; + +struct dmub_rb_cmd_lsdma { + struct dmub_cmd_header header; + struct dmub_cmd_lsdma_data lsdma_data; +}; + struct dmub_optc_state_v2 { uint32_t v_total_min; uint32_t v_total_max; @@ -1923,6 +2127,28 @@ enum fams2_stream_type { FAMS2_STREAM_TYPE_SUBVP = 4, }; +struct dmub_rect16 { + /** + * Dirty rect x offset. + */ + uint16_t x; + + /** + * Dirty rect y offset. + */ + uint16_t y; + + /** + * Dirty rect width. + */ + uint16_t width; + + /** + * Dirty rect height. + */ + uint16_t height; +}; + /* static stream state */ struct dmub_fams2_legacy_stream_static_state { uint8_t vactive_det_fill_delay_otg_vlines; @@ -1995,11 +2221,13 @@ union dmub_fams2_stream_static_sub_state { }; //v0 union dmub_fams2_cmd_stream_static_sub_state { - struct dmub_fams2_cmd_legacy_stream_static_state legacy; - struct dmub_fams2_cmd_subvp_stream_static_state subvp; - struct dmub_fams2_cmd_drr_stream_static_state drr; + COMMON_STREAM_STATIC_SUB_STATE }; //v1 +union dmub_fams2_stream_static_sub_state_v2 { + COMMON_STREAM_STATIC_SUB_STATE +}; //v2 + struct dmub_fams2_stream_static_state { enum fams2_stream_type type; uint32_t otg_vline_time_ns; @@ -2065,7 +2293,7 @@ struct dmub_fams2_cmd_stream_static_base_state { struct dmub_fams2_stream_static_state_v1 { struct dmub_fams2_cmd_stream_static_base_state base; - union dmub_fams2_cmd_stream_static_sub_state sub_state; + union dmub_fams2_stream_static_sub_state_v2 sub_state; }; //v1 /** @@ -2113,6 +2341,11 @@ union dmub_cmd_fams2_config { } stream_v1; //v1 }; +struct dmub_fams2_config_v2 { + struct dmub_cmd_fams2_global_config global; + struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1 +}; + /** * DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy) */ @@ -2122,6 +2355,22 @@ struct dmub_rb_cmd_fams2 { }; /** + * Indirect buffer descriptor + */ +struct dmub_ib_data { + union dmub_addr src; // location of indirect buffer in memory + uint16_t size; // indirect buffer size in bytes +}; + +/** + * DMUB rb command definition for commands passed over indirect buffer + */ +struct dmub_rb_cmd_ib { + struct dmub_cmd_header header; + struct dmub_ib_data ib_data; +}; + +/** * enum dmub_cmd_idle_opt_type - Idle optimization command type. */ enum dmub_cmd_idle_opt_type { @@ -2144,6 +2393,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware notify power state. */ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3, + + /** + * DCN notify to release HW. + */ + DMUB_CMD__IDLE_OPT_RELEASE_HW = 4, }; /** @@ -2289,7 +2543,8 @@ struct dmub_dig_transmitter_control_data_v1_7 { uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */ uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */ uint8_t reserved1; /**< For future use */ - uint8_t reserved2[3]; /**< For future use */ + uint8_t skip_phy_ssc_reduction; + uint8_t reserved2[2]; /**< For future use */ uint32_t reserved3[11]; /**< For future use */ }; @@ -2636,7 +2891,11 @@ enum dp_hpd_type { /** * DP HPD short pulse */ - DP_IRQ + DP_IRQ = 1, + /** + * Failure to acquire DP HPD state + */ + DP_NONE_HPD = 2 }; /** @@ -2903,6 +3162,7 @@ enum dmub_cmd_fams_type { DMUB_CMD__FAMS2_CONFIG = 4, DMUB_CMD__FAMS2_DRR_UPDATE = 5, DMUB_CMD__FAMS2_FLIP = 6, + DMUB_CMD__FAMS2_IB_CONFIG = 7, }; /** @@ -3609,6 +3869,12 @@ struct dmub_rb_cmd_psr_set_power_opt { struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; }; +enum dmub_alpm_mode { + ALPM_AUXWAKE = 0, + ALPM_AUXLESS = 1, + ALPM_UNSUPPORTED = 2, +}; + /** * Definition of Replay Residency GPINT command. * Bit[0] - Residency mode for Revision 0 @@ -3742,6 +4008,15 @@ enum dmub_cmd_replay_general_subtype { REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE, }; +struct dmub_alpm_auxless_data { + uint16_t lfps_setup_ns; + uint16_t lfps_period_ns; + uint16_t lfps_silence_ns; + uint16_t lfps_t1_t2_override_us; + short lfps_t1_t2_offset_us; + uint8_t lttpr_count; +}; + /** * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command. */ @@ -3773,6 +4048,14 @@ struct dmub_cmd_replay_copy_settings_data { */ uint8_t digbe_inst; /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** * AUX HW instance. */ uint8_t aux_inst; @@ -3812,6 +4095,15 @@ struct dmub_cmd_replay_copy_settings_data { * Use FSM state for Replay power up/down */ uint8_t use_phy_fsm; + /** + * Use for AUX-less ALPM LFPS wake operation + */ + struct dmub_alpm_auxless_data auxless_alpm_data; + + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; }; /** @@ -3867,6 +4159,18 @@ struct dmub_rb_cmd_replay_enable_data { * This does not support HDMI/DP2 for now. */ uint8_t phy_rate; + /** + * @hpo_stream_enc_inst: HPO stream encoder instance + */ + uint8_t hpo_stream_enc_inst; + /** + * @hpo_link_enc_inst: HPO link encoder instance + */ + uint8_t hpo_link_enc_inst; + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; }; /** @@ -4360,6 +4664,42 @@ enum dmub_cmd_abm_type { * Get the current ACE curve. */ DMUB_CMD__ABM_GET_ACE_CURVE = 10, + + /** + * Get current histogram data + */ + DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11, +}; + +/** + * LSDMA command sub-types. + */ +enum dmub_cmd_lsdma_type { + /** + * Initialize parameters for LSDMA. + * Ring buffer is mapped to the ring buffer + */ + DMUB_CMD__LSDMA_INIT_CONFIG = 0, + /** + * LSDMA copies data from source to destination linearly + */ + DMUB_CMD__LSDMA_LINEAR_COPY = 1, + /** + * Send the tiled-to-tiled copy command + */ + DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2, + /** + * Send the poll reg write command + */ + DMUB_CMD__LSDMA_POLL_REG_WRITE = 3, + /** + * Send the pio copy command + */ + DMUB_CMD__LSDMA_PIO_COPY = 4, + /** + * Send the pio constfill command + */ + DMUB_CMD__LSDMA_PIO_CONSTFILL = 5, }; struct abm_ace_curve { @@ -4954,6 +5294,20 @@ enum dmub_abm_ace_curve_type { }; /** + * enum dmub_abm_histogram_type - Histogram type. + */ +enum dmub_abm_histogram_type { + /** + * ACE curve as defined by the SW layer. + */ + ABM_HISTOGRAM_TYPE__SW = 0, + /** + * ACE curve as defined by the SW to HW translation interface layer. + */ + ABM_HISTOGRAM_TYPE__SW_IF = 1, +}; + +/** * Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command. */ struct dmub_rb_cmd_abm_get_ace_curve { @@ -4989,6 +5343,41 @@ struct dmub_rb_cmd_abm_get_ace_curve { }; /** + * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command. + */ +struct dmub_rb_cmd_abm_get_histogram { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * Address where Histogram should be copied. + */ + union dmub_addr dest; + + /** + * Type of Histogram being queried. + */ + enum dmub_abm_histogram_type histogram_type; + + /** + * Indirect buffer length. + */ + uint16_t bytes; + + /** + * eDP panel instance. + */ + uint8_t panel_inst; + + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad; +}; + +/** * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command. */ struct dmub_rb_cmd_abm_save_restore { @@ -5389,7 +5778,8 @@ struct dmub_cmd_fused_request { struct dmub_cmd_fused_request_location_i2c { uint8_t is_aux : 1; // False uint8_t ddc_line : 3; - uint8_t _reserved0 : 4; + uint8_t over_aux : 1; + uint8_t _reserved0 : 3; uint8_t address; uint8_t offset; uint8_t length; @@ -5517,6 +5907,59 @@ struct dmub_rb_cmd_assr_enable { }; /** + * Current definition of "ips_mode" from driver + */ +enum ips_residency_mode { + IPS_RESIDENCY__IPS1_MAX, + IPS_RESIDENCY__IPS2, + IPS_RESIDENCY__IPS1_RCG, + IPS_RESIDENCY__IPS1_ONO2_ON, +}; + +#define NUM_IPS_HISTOGRAM_BUCKETS 16 + +/** + * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats + */ +struct dmub_ips_residency_info { + uint32_t residency_millipercent; + uint32_t entry_counter; + uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS]; + uint64_t total_time_us; + uint64_t total_inactive_time_us; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command. + */ +struct dmub_cmd_ips_residency_cntl_data { + uint8_t panel_inst; + uint8_t start_measurement; + uint8_t padding[2]; // align to 4-byte boundary +}; + +struct dmub_rb_cmd_ips_residency_cntl { + struct dmub_cmd_header header; + struct dmub_cmd_ips_residency_cntl_data cntl_data; +}; + +/** + * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command. + */ +struct dmub_cmd_ips_query_residency_info_data { + union dmub_addr dest; + uint32_t size; + uint32_t ips_mode; + uint8_t panel_inst; + uint8_t padding[3]; // align to 4-byte boundary +}; + +struct dmub_rb_cmd_ips_query_residency_info { + struct dmub_cmd_header header; + struct dmub_cmd_ips_query_residency_info_data info_data; +}; + +/** * union dmub_rb_cmd - DMUB inbox command. */ union dmub_rb_cmd { @@ -5687,6 +6130,11 @@ union dmub_rb_cmd { struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve; /** + * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command. + */ + struct dmub_rb_cmd_abm_get_histogram abm_get_histogram; + + /** * Definition of a DMUB_CMD__ABM_SET_EVENT command. */ struct dmub_rb_cmd_abm_set_event abm_set_event; @@ -5817,13 +6265,25 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ struct dmub_rb_cmd_assr_enable assr_enable; + struct dmub_rb_cmd_fams2 fams2_config; + struct dmub_rb_cmd_ib ib_fams2_config; + struct dmub_rb_cmd_fams2_drr_update fams2_drr_update; struct dmub_rb_cmd_fams2_flip fams2_flip; struct dmub_rb_cmd_fused_io fused_io; + + /** + * Definition of a DMUB_CMD__LSDMA command. + */ + struct dmub_rb_cmd_lsdma lsdma; + + struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl; + + struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info; }; /** @@ -5934,6 +6394,9 @@ static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb) else data_count = rb->capacity - (rb->rptr - rb->wrpt); + /* +1 because 1 entry is always unusable */ + data_count += DMUB_RB_CMD_SIZE; + return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE; } @@ -5953,6 +6416,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) else data_count = rb->capacity - (rb->rptr - rb->wrpt); + /* -1 because 1 entry is always unusable */ return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE)); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index a308bd604677..3f38db752b84 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -416,7 +416,7 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub) void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -466,6 +466,9 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 72a0f078cd1a..2228d62adc7e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) uint32_t in_reset, is_enabled, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (in_reset == 0) { + if (in_reset == 0 && is_enabled != 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - /** - * Timeout covers both the ACK and the wait - * for remaining work to finish. - */ - for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; @@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) /* Force reset in case we timed out, DMCUB is likely hung. */ } - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (is_enabled) { REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + udelay(1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); } @@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub) LONO_SOCCLK_GATE_DISABLE, 1, LONO_DMCUBCLK_GATE_DISABLE, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); - udelay(1); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); - REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - udelay(1); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0); } @@ -464,7 +454,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_pwait; uint32_t is_traceport_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -515,6 +505,9 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index e67f7c4784eb..b31adbd0d685 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) const uint32_t timeout_us = 1 * 1000 * 1000; //1s const uint32_t poll_delay_us = 1; //1us uint32_t i = 0; - uint32_t in_reset, scratch, pwait_mode; + uint32_t enabled, in_reset, scratch, pwait_mode; - REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, + DMCUB_ENABLE, &enabled); + REG_GET(DMCUB_CNTL2, + DMCUB_SOFT_RESET, &in_reset); - if (in_reset == 0) { + if (enabled && in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - for (i = 0; i < timeout_us; i++) { - if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) - break; - - udelay(poll_delay_us); - } - for (; i < timeout_us; i++) { scratch = dmub->hw_funcs.get_gpint_response(dmub); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) @@ -417,7 +413,7 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub) void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -468,6 +464,9 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; @@ -517,28 +516,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { uint32_t *dwords = (uint32_t *)cmd; - + int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes; + uint32_t msg_index; static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch"); - REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]); - REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]); - REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]); - REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]); - REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]); - REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]); - REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]); - REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]); - REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]); - REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]); - REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]); - REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]); - REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]); - REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]); - REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]); + /* read remaining data based on payload size */ + for (msg_index = 0; msg_index < 15; msg_index++) { + if (payload_size_bytes <= msg_index * 4) { + break; + } + + switch (msg_index) { + case 0: + REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]); + break; + case 1: + REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]); + break; + case 2: + REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]); + break; + case 3: + REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]); + break; + case 4: + REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]); + break; + case 5: + REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]); + break; + case 6: + REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]); + break; + case 7: + REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]); + break; + case 8: + REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]); + break; + case 9: + REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]); + break; + case 10: + REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]); + break; + case 11: + REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]); + break; + case 12: + REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]); + break; + case 13: + REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]); + break; + case 14: + REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]); + break; + } + } + /* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY * interrupt. */ - REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]); + REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]); } uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub) @@ -556,30 +596,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub, static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch"); - dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0); - dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1); - dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2); - dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3); - dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4); - dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5); - dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6); - dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7); - dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8); - dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9); - dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10); - dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11); - dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12); - dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13); - dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14); - dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP); + dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP); + dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0); + dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1); + dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2); + dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3); + dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4); + dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5); + dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6); + dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7); + dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8); + dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9); + dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10); + dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11); + dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12); + dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13); + dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14); } void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub) { REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1); +} + +void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub) +{ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0); } +void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable) +{ + REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0); +} + void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub) { REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1); @@ -604,11 +653,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub) return status; } -void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable) -{ - REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0); -} - void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable) { REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h index c35be52676f6..88c3a44d67d9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h @@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub); void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub, union dmub_rb_cmd *cmd); void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub); +void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub); +void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable); + void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub); void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg); void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg); uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub); -void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable); void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable); uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index ae8133816b43..b17a19400c06 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -65,6 +65,12 @@ /* Default scratch mem size. */ #define DMUB_SCRATCH_MEM_SIZE (1024) +/* Default indirect buffer size. */ +#define DMUB_IB_MEM_SIZE (1280) + +/* Default LSDMA ring buffer size. */ +#define DMUB_LSDMA_RB_SIZE (64 * 1024) + /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) /* Base addresses. */ @@ -157,6 +163,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; + /* default to specifying now inbox type */ + enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT; + switch (asic) { case DMUB_ASIC_DCN20: case DMUB_ASIC_DCN21: @@ -395,10 +404,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_current_time = dmub_dcn401_get_current_time; funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data; + funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg; funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status; funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp; funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack; + funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack; + funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int; + default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now + funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack; funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg; funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp; @@ -411,6 +425,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) return false; } + /* set default inbox type if not overriden */ + if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) { + if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) { + /* use default inbox type as specified by DCN rev */ + dmub->inbox_type = default_inbox_type; + } else if (funcs->send_reg_inbox0_cmd_msg) { + /* prefer reg as default inbox type if present */ + dmub->inbox_type = DMUB_CMD_INTERFACE_REG; + } else { + /* use fb as fallback */ + dmub->inbox_type = DMUB_CMD_INTERFACE_FB; + } + } + return true; } @@ -426,6 +454,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub, dmub->asic = params->asic; dmub->fw_version = params->fw_version; dmub->is_virtual = params->is_virtual; + dmub->inbox_type = params->inbox_type; /* Setup asic dependent hardware funcs. */ if (!dmub_srv_hw_setup(dmub, params->asic)) { @@ -536,7 +565,9 @@ enum dmub_status window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE; window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); + window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE; out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); @@ -622,6 +653,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM]; struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; @@ -632,7 +664,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_INVALID; if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || - !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { + !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) { ASSERT(0); return DMUB_STATUS_INVALID; } @@ -695,7 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, inbox1.base = cw4.region.base; inbox1.top = cw4.region.base + DMUB_RB_SIZE; outbox1.base = inbox1.top; - outbox1.top = cw4.region.top; + outbox1.top = inbox1.top + DMUB_RB_SIZE; cw5.offset.quad_part = tracebuff_fb->gpu_addr; cw5.region.base = DMUB_CW5_BASE; @@ -718,6 +750,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->scratch_mem_fb = *scratch_mem_fb; + dmub->ib_mem_gart = *ib_mem_gart; + if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); @@ -737,7 +771,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, rb_params.ctx = dmub; rb_params.base_address = mail_fb->cpu_addr; rb_params.capacity = DMUB_RB_SIZE; - dmub_rb_init(&dmub->inbox1_rb, &rb_params); + dmub_rb_init(&dmub->inbox1.rb, &rb_params); // Initialize outbox1 ring buffer rb_params.ctx = dmub; @@ -768,27 +802,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) -{ - if (!dmub->sw_init) - return DMUB_STATUS_INVALID; - - if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { - uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub); - - if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) { - return DMUB_STATUS_HW_FAILURE; - } else { - dmub->inbox1_rb.rptr = rptr; - dmub->inbox1_rb.wrpt = wptr; - dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; - } - } - - return DMUB_STATUS_OK; -} - enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) { if (!dmub->sw_init) @@ -799,8 +812,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) /* mailboxes have been reset in hw, so reset the sw state as well */ dmub->inbox1_last_wptr = 0; - dmub->inbox1_rb.wrpt = 0; - dmub->inbox1_rb.rptr = 0; + dmub->inbox1.rb.wrpt = 0; + dmub->inbox1.rb.rptr = 0; + dmub->inbox1.num_reported = 0; + dmub->inbox1.num_submitted = 0; + dmub->reg_inbox0.num_reported = 0; + dmub->reg_inbox0.num_submitted = 0; + dmub->reg_inbox0.is_pending = 0; dmub->outbox0_rb.wrpt = 0; dmub->outbox0_rb.rptr = 0; dmub->outbox1_rb.wrpt = 0; @@ -811,7 +829,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, +enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub, const union dmub_rb_cmd *cmd) { if (!dmub->hw_init) @@ -820,18 +838,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, if (dmub->power_state != DMUB_POWER_STATE_D0) return DMUB_STATUS_POWER_STATE_D3; - if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity || - dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) { + if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity || + dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) { return DMUB_STATUS_HW_FAILURE; } - if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) + if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) { + dmub->inbox1.num_submitted++; return DMUB_STATUS_OK; + } return DMUB_STATUS_QUEUE_FULL; } -enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) +enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub) { struct dmub_rb flush_rb; @@ -846,13 +866,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) * been flushed to framebuffer memory. Otherwise DMCUB might * read back stale, fully invalid or partially invalid data. */ - flush_rb = dmub->inbox1_rb; + flush_rb = dmub->inbox1.rb; flush_rb.rptr = dmub->inbox1_last_wptr; dmub_rb_flush_pending(&flush_rb); - dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt); - dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt; return DMUB_STATUS_OK; } @@ -910,26 +930,84 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, return DMUB_STATUS_TIMEOUT; } +static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub) +{ + if (dmub->reg_inbox0.is_pending) { + dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status && + !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); + + if (!dmub->reg_inbox0.is_pending) { + /* ack the rsp interrupt */ + if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack) + dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub); + + /* only update the reported count if commands aren't being batched */ + if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) { + dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted; + } + } + } +} + +enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + const uint32_t polling_interval_us = 1; + struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0; + struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1; + const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0; + const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1; + + if (!dmub->hw_init || + !dmub->hw_funcs.get_inbox1_wptr) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; i += polling_interval_us) { + scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); + scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + + scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending && + dmub->hw_funcs.read_reg_inbox0_rsp_int_status && + !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); + + if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity) + return DMUB_STATUS_HW_FAILURE; + + /* check current HW state first, but use command submission vs reported as a fallback */ + if ((dmub_rb_empty(&scratch_inbox1.rb) || + inbox1->num_reported >= scratch_inbox1.num_submitted) && + (!scratch_reg_inbox0.is_pending || + reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted)) + return DMUB_STATUS_OK; + + udelay(polling_interval_us); + } + + return DMUB_STATUS_TIMEOUT; +} + enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { - uint32_t i, rptr; + enum dmub_status status; + uint32_t i; + const uint32_t polling_interval_us = 1; if (!dmub->hw_init) return DMUB_STATUS_INVALID; - for (i = 0; i <= timeout_us; ++i) { - rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - - if (rptr > dmub->inbox1_rb.capacity) - return DMUB_STATUS_HW_FAILURE; + for (i = 0; i < timeout_us; i += polling_interval_us) { + status = dmub_srv_update_inbox_status(dmub); - dmub->inbox1_rb.rptr = rptr; + if (status != DMUB_STATUS_OK) + return status; - if (dmub_rb_empty(&dmub->inbox1_rb)) + /* check for idle */ + if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending) return DMUB_STATUS_OK; - udelay(1); + udelay(polling_interval_us); } return DMUB_STATUS_TIMEOUT; @@ -1040,35 +1118,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub, return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, - union dmub_rb_cmd *cmd) -{ - enum dmub_status status = DMUB_STATUS_OK; - - // Queue command - status = dmub_srv_cmd_queue(dmub, cmd); - - if (status != DMUB_STATUS_OK) - return status; - - // Execute command - status = dmub_srv_cmd_execute(dmub); - - if (status != DMUB_STATUS_OK) - return status; - - // Wait for DMUB to process command - status = dmub_srv_wait_for_idle(dmub, 100000); - - if (status != DMUB_STATUS_OK) - return status; - - // Copy data back from ring buffer into command - dmub_rb_get_return_data(&dmub->inbox1_rb, cmd); - - return status; -} - static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, void *entry) { @@ -1160,47 +1209,162 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_ } } +void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state) +{ + if (!dmub || !dmub->hw_init) + return; + + dmub->power_state = dmub_srv_power_state; +} -enum dmub_status dmub_srv_send_reg_inbox0_cmd( - struct dmub_srv *dmub, - union dmub_rb_cmd *cmd, - bool with_reply, uint32_t timeout_us) +enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) { - uint32_t rsp_ready = 0; - uint32_t i; + uint32_t num_pending = 0; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (dmub->power_state != DMUB_POWER_STATE_D0) + return DMUB_STATUS_POWER_STATE_D3; + if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg || + !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack) + return DMUB_STATUS_INVALID; + + if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported) + num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported; + else + /* num_submitted wrapped */ + num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY - + (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted); + + if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY) + return DMUB_STATUS_QUEUE_FULL; + + /* clear last rsp ack and send message */ + dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub); dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd); - for (i = 0; i < timeout_us; i++) { - rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); - if (rsp_ready) - break; - udelay(1); + dmub->reg_inbox0.num_submitted++; + dmub->reg_inbox0.is_pending = true; + dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending; + + return DMUB_STATUS_OK; +} + +void dmub_srv_cmd_get_response(struct dmub_srv *dmub, + union dmub_rb_cmd *cmd_rsp) +{ + if (dmub) { + if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG && + dmub->hw_funcs.read_reg_inbox0_cmd_rsp) { + dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp); + } else { + dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp); + } } - if (rsp_ready == 0) - return DMUB_STATUS_TIMEOUT; +} - if (with_reply) - dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd); +static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub) +{ + if (!dmub || !dmub->sw_init) + return DMUB_STATUS_INVALID; - dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub); + dmub->reg_inbox0.is_pending = 0; + dmub->reg_inbox0.is_multi_pending = 0; - /* wait for rsp int status is cleared to initial state before exit */ - for (; i <= timeout_us; i++) { - rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); - if (rsp_ready == 0) - break; - udelay(1); + return DMUB_STATUS_OK; +} + +static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { + uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub); + + if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) { + return DMUB_STATUS_HW_FAILURE; + } else { + dmub->inbox1.rb.rptr = rptr; + dmub->inbox1.rb.wrpt = wptr; + dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt; + } } - ASSERT(rsp_ready == 0); return DMUB_STATUS_OK; } -void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state) +enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub) { - if (!dmub || !dmub->hw_init) - return; + enum dmub_status status; - dmub->power_state = dmub_srv_power_state; + status = dmub_srv_sync_reg_inbox0(dmub); + if (status != DMUB_STATUS_OK) + return status; + + status = dmub_srv_sync_inbox1(dmub); + if (status != DMUB_STATUS_OK) + return status; + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, + uint32_t timeout_us, + uint32_t num_free_required) +{ + enum dmub_status status; + uint32_t i; + const uint32_t polling_interval_us = 1; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + for (i = 0; i < timeout_us; i += polling_interval_us) { + status = dmub_srv_update_inbox_status(dmub); + + if (status != DMUB_STATUS_OK) + return status; + + /* check for space in inbox1 */ + if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required) + return DMUB_STATUS_OK; + + udelay(polling_interval_us); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub) +{ + uint32_t rptr; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (dmub->power_state != DMUB_POWER_STATE_D0) + return DMUB_STATUS_POWER_STATE_D3; + + /* update inbox1 state */ + rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + + if (rptr > dmub->inbox1.rb.capacity) + return DMUB_STATUS_HW_FAILURE; + + if (dmub->inbox1.rb.rptr > rptr) { + /* rb wrapped */ + dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; + } else { + dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE; + } + dmub->inbox1.rb.rptr = rptr; + + /* update reg_inbox0 */ + dmub_srv_update_reg_inbox0_status(dmub); + + return DMUB_STATUS_OK; } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c index cce887cefc01..567c5b1aeb7a 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -95,23 +95,6 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, case DMUB_OUT_CMD__DPIA_NOTIFICATION: notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; notify->link_index = cmd.dpia_notification.payload.header.instance; - - if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { - - notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw = - cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw; - notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw = - cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw; - - if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed) - notify->result = DPIA_BW_REQ_FAILED; - else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) - notify->result = DPIA_BW_REQ_SUCCESS; - else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed) - notify->result = DPIA_EST_BW_CHANGED; - else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) - notify->result = DPIA_BW_ALLOC_CAPS_CHANGED; - } break; case DMUB_OUT_CMD__HPD_SENSE_NOTIFY: notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY; @@ -119,6 +102,10 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, &cmd.hpd_sense_notify.data, sizeof(cmd.hpd_sense_notify.data)); break; + case DMUB_OUT_CMD__FUSED_IO: + notify->type = DMUB_NOTIFICATION_FUSED_IO; + dmub_memcpy(¬ify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request)); + break; default: notify->type = DMUB_NOTIFICATION_NO_DATA; break; diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h index 7e3240e73c1f..63813009a3a6 100644 --- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h +++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h @@ -86,6 +86,9 @@ enum dc_irq_source dal_irq_get_source( enum dc_irq_source dal_irq_get_rx_source( const struct gpio *irq); +enum dc_irq_source dal_irq_get_read_request( + const struct gpio *irq); + enum gpio_result dal_irq_setup_hpd_filter( struct gpio *irq, struct gpio_hpd_config *config); diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 813463ffe15c..cc467031651d 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -424,7 +424,7 @@ struct integrated_info { /* * DFS-bypass flag */ -/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */ +/* Copy of SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS from atombios.h */ enum { DFS_BYPASS_ENABLE = 0x10 }; diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 1867aac57cf2..da74ed66c8f9 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -89,6 +89,8 @@ struct link_training_settings { bool enhanced_framing; enum lttpr_mode lttpr_mode; + bool lttpr_early_tps2; + /* disallow different lanes to have different lane settings */ bool disallow_per_lane_settings; /* dpcd lane settings will always use the same hw lane settings diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 3ba9b62ba70b..71efd2770c99 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh( ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); - if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) { + if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) { /* When the target refresh rate is the minimum panel refresh rate, * round down the vtotal value to avoid stretching vblank over * panel's vtotal boundary. @@ -155,6 +155,14 @@ unsigned int mod_freesync_calc_v_total_from_refresh( v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000000); + } else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) { + /* When the target refresh rate is the maximum panel refresh rate + * round up the vtotal value to prevent off-by-one error causing + * v_total_min to be below the panel's lower bound + */ + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total) + (1000000 - 1), 1000000); } else { v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 55c7d873175f..a37634942b07 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp); /* hdcp version helpers */ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 1d41dd58f6bc..bb8ae80b37f8 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -452,21 +452,12 @@ out: return status; } -static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, +static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp, struct mod_hdcp_event_context *event_ctx, struct mod_hdcp_transition_input_hdcp2 *input) { enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; - if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { - event_ctx->unexpected_event = 1; - goto out; - } - - if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, - &input->lc_init_prepare, &status, - hdcp, "lc_init_prepare")) - goto out; if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, &input->lc_init_write, &status, hdcp, "lc_init_write")) @@ -482,6 +473,48 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, &input->l_prime_read, &status, hdcp, "l_prime_read")) goto out; +out: + return status; +} + +static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw, + &input->l_prime_read, &status, + hdcp, "l_prime_read")) + goto out; + +out: + return status; +} + +static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c + && hdcp->config.ddc.funcs.atomic_write_poll_read_aux + && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, + &input->lc_init_prepare, &status, + hdcp, "lc_init_prepare")) + goto out; + + status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, &input->l_prime_validation, &status, hdcp, "l_prime_validation")) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index c5f6c11de7e5..89ffb89e1932 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -184,17 +184,28 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); break; - case H2_A2_LOCALITY_CHECK: + case H2_A2_LOCALITY_CHECK: { + const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c + && !adjust->hdcp2.force_sw_locality_check; + + /* + * 1A-05: consider disconnection after LC init a failure + * 1A-13-1: consider invalid l' a failure + * 1A-13-2: consider l' timeout a failure + */ if (hdcp->state.stay_count > 10 || input->lc_init_prepare != PASS || - input->lc_init_write != PASS || - input->l_prime_available_poll != PASS || - input->l_prime_read != PASS) { - /* - * 1A-05: consider disconnection after LC init a failure - * 1A-13-1: consider invalid l' a failure - * 1A-13-2: consider l' timeout a failure - */ + (!use_fw && input->lc_init_write != PASS) || + (!use_fw && input->l_prime_available_poll != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->l_prime_read != PASS) { + if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { + adjust->hdcp2.force_sw_locality_check = true; + callback_in_ms(0, output); + break; + } + fail_and_restart_in_ms(0, &status, output); break; } else if (input->l_prime_validation != PASS) { @@ -205,6 +216,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); break; + } case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: if (input->eks_prepare != PASS || input->eks_write != PASS) { @@ -498,14 +510,25 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); break; - case D2_A2_LOCALITY_CHECK: + case D2_A2_LOCALITY_CHECK: { + const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux + && !adjust->hdcp2.force_sw_locality_check; + if (hdcp->state.stay_count > 10 || input->lc_init_prepare != PASS || - input->lc_init_write != PASS || - input->l_prime_read != PASS) { + (!use_fw && input->lc_init_write != PASS)) { /* 1A-12: consider invalid l' a failure */ fail_and_restart_in_ms(0, &status, output); break; + } else if (input->l_prime_read != PASS) { + if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { + adjust->hdcp2.force_sw_locality_check = true; + callback_in_ms(0, output); + break; + } + + fail_and_restart_in_ms(0, &status, output); + break; } else if (input->l_prime_validation != PASS) { callback_in_ms(0, output); increment_stay_counter(hdcp); @@ -514,6 +537,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, callback_in_ms(0, output); set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); break; + } case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: if (input->eks_prepare != PASS || input->eks_write != PASS) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index 6e064e6ae949..2e6408579194 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -688,3 +688,76 @@ enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_INVALID_OPERATION; } + +static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp) +{ + struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2; + + struct mod_hdcp_atomic_op_aux write = { + hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT], + hdcp2->lc_init + 1, + sizeof(hdcp2->lc_init) - 1, + }; + struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, }; + struct mod_hdcp_atomic_op_aux read = { + hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME], + hdcp2->lc_l_prime + 1, + sizeof(hdcp2->lc_l_prime) - 1, + }; + + hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME; + + return hdcp->config.ddc.funcs.atomic_write_poll_read_aux( + hdcp->config.ddc.handle, + &write, + &stall, + &read, + 16 * 1000, + 0 + ); +} + +static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp) +{ + struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2; + uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) }; + + hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT]; + memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init)); + + struct mod_hdcp_atomic_op_i2c write = { + HDCP_I2C_ADDR, + 0, + hdcp->buf, + sizeof(hdcp2->lc_init) + 1, + }; + struct mod_hdcp_atomic_op_i2c poll = { + HDCP_I2C_ADDR, + hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS], + expected_rxstatus, + sizeof(expected_rxstatus), + }; + struct mod_hdcp_atomic_op_i2c read = { + HDCP_I2C_ADDR, + hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME], + hdcp2->lc_l_prime, + sizeof(hdcp2->lc_l_prime), + }; + + return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c( + hdcp->config.ddc.handle, + &write, + &poll, + &read, + 20 * 1000, + 6 + ); +} + +enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp) +{ + const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp); + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 8c137d7c032e..e58e7b93810b 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) struct mod_hdcp_display *display = get_first_active_display(hdcp); enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + mutex_lock(&psp->hdcp_context.mutex); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index a4d344a4db9e..c42468bb70ac 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -133,9 +133,22 @@ enum mod_hdcp_display_disable_option { MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION, }; +struct mod_hdcp_atomic_op_i2c { + uint8_t address; + uint8_t offset; + uint8_t *data; + uint32_t size; +}; + +struct mod_hdcp_atomic_op_aux { + uint32_t address; + uint8_t *data; + uint32_t size; +}; + struct mod_hdcp_ddc { void *handle; - struct { + struct mod_hdcp_ddc_funcs { bool (*read_i2c)(void *handle, uint32_t address, uint8_t offset, @@ -153,6 +166,22 @@ struct mod_hdcp_ddc { uint32_t address, const uint8_t *data, uint32_t size); + bool (*atomic_write_poll_read_i2c)( + void *handle, + const struct mod_hdcp_atomic_op_i2c *write, + const struct mod_hdcp_atomic_op_i2c *poll, + struct mod_hdcp_atomic_op_i2c *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb + ); + bool (*atomic_write_poll_read_aux)( + void *handle, + const struct mod_hdcp_atomic_op_aux *write, + const struct mod_hdcp_atomic_op_aux *poll, + struct mod_hdcp_atomic_op_aux *read, + uint32_t poll_timeout_us, + uint8_t poll_mask_msb + ); } funcs; }; @@ -185,7 +214,8 @@ struct mod_hdcp_link_adjustment_hdcp2 { uint8_t force_type : 2; uint8_t force_no_stored_km : 1; uint8_t increase_h_prime_timeout: 1; - uint8_t reserved : 3; + uint8_t force_sw_locality_check : 1; + uint8_t reserved : 2; }; struct mod_hdcp_link_adjustment { @@ -272,6 +302,10 @@ struct mod_hdcp_display_query { struct mod_hdcp_config { struct mod_hdcp_psp psp; struct mod_hdcp_ddc ddc; + struct { + uint8_t lc_enable_sw_fallback : 1; + uint8_t reserved : 7; + } debug; uint8_t index; }; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 758a8aa31fbe..391209a3bf29 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -79,4 +79,6 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps); void reset_replay_dsync_error_count(struct dc_link *link); +void change_replay_to_psr(struct dc_link *link); +void change_psr_to_replay(struct dc_link *link); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 4c95b885d1d0..bfb446736ca8 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -351,9 +351,10 @@ enum DC_DEBUG_MASK { DC_DISABLE_HDMI_CEC = 0x10000, /** - * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver. + * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted + * Memory Clock Switching (FAMS) feature in amdgpu driver. */ - DC_DISABLE_SUBVP = 0x20000, + DC_DISABLE_SUBVP_FAMS = 0x20000, /** * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves */ @@ -366,10 +367,15 @@ enum DC_DEBUG_MASK { DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000, /** - * @DC_HDCP_LC_ENABLE_SW_FALLBACK If set, upon HDCP Locality Check FW + * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW * path failure, retry using legacy SW path. */ DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000, + + /** + * @DC_SKIP_DETECTION_LT: If set, skip detection link training + */ + DC_SKIP_DETECTION_LT = 0x200000, }; enum amd_dpm_forced_level; @@ -390,6 +396,7 @@ enum amd_dpm_forced_level; * (such as allocating any required memory) * @suspend: handles IP specific hw/sw changes for suspend * @resume: handles IP specific hw/sw changes for resume + * @complete: handles IP specific changes after resume * @is_idle: returns current IP block idle status * @wait_for_idle: poll for idle * @check_soft_reset: check soft reset the IP block @@ -421,6 +428,7 @@ struct amd_ip_funcs { int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); int (*suspend)(struct amdgpu_ip_block *ip_block); int (*resume)(struct amdgpu_ip_block *ip_block); + void (*complete)(struct amdgpu_ip_block *ip_block); bool (*is_idle)(struct amdgpu_ip_block *ip_block); int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index bd8085ec54ed..2d6a598a6c25 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -5242,6 +5242,8 @@ #define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c #define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L #define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000 +#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE_MASK 0x00000300L +#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT 0x00000008 #define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L #define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004 #define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h index 15e5a65cf492..70ee6be94a9b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h @@ -9776,6 +9776,14 @@ #define regDIG0_DIG_BE_CNTL_BASE_IDX 2 #define regDIG0_DIG_BE_EN_CNTL 0x20bd #define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2 +#define regDIG0_HDCP_INT_CONTROL 0x20c0 +#define regDIG0_HDCP_INT_CONTROL_BASE_IDX 2 +#define regDIG0_HDCP_LINK0_STATUS 0x20c1 +#define regDIG0_HDCP_LINK0_STATUS_BASE_IDX 2 +#define regDIG0_HDCP_I2C_CONTROL_0 0x20c2 +#define regDIG0_HDCP_I2C_CONTROL_0_BASE_IDX 2 +#define regDIG0_HDCP_I2C_CONTROL_1 0x20c3 +#define regDIG0_HDCP_I2C_CONTROL_1_BASE_IDX 2 #define regDIG0_TMDS_CNTL 0x20e4 #define regDIG0_TMDS_CNTL_BASE_IDX 2 #define regDIG0_TMDS_CONTROL_CHAR 0x20e5 @@ -10081,6 +10089,12 @@ #define regDIG1_DIG_BE_CNTL_BASE_IDX 2 #define regDIG1_DIG_BE_EN_CNTL 0x21e1 #define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2 +#define regDIG1_HDCP_INT_CONTROL 0x21e4 +#define regDIG1_HDCP_INT_CONTROL_BASE_IDX 2 +#define regDIG1_HDCP_I2C_CONTROL_0 0x21e6 +#define regDIG1_HDCP_I2C_CONTROL_0_BASE_IDX 2 +#define regDIG1_HDCP_I2C_CONTROL_1 0x21e7 +#define regDIG1_HDCP_I2C_CONTROL_1_BASE_IDX 2 #define regDIG1_TMDS_CNTL 0x2208 #define regDIG1_TMDS_CNTL_BASE_IDX 2 #define regDIG1_TMDS_CONTROL_CHAR 0x2209 @@ -10386,6 +10400,12 @@ #define regDIG2_DIG_BE_CNTL_BASE_IDX 2 #define regDIG2_DIG_BE_EN_CNTL 0x2305 #define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2 +#define regDIG2_HDCP_INT_CONTROL 0x2308 +#define regDIG2_HDCP_INT_CONTROL_BASE_IDX 2 +#define regDIG2_HDCP_I2C_CONTROL_0 0x230a +#define regDIG2_HDCP_I2C_CONTROL_0_BASE_IDX 2 +#define regDIG2_HDCP_I2C_CONTROL_1 0x230b +#define regDIG2_HDCP_I2C_CONTROL_1_BASE_IDX 2 #define regDIG2_TMDS_CNTL 0x232c #define regDIG2_TMDS_CNTL_BASE_IDX 2 #define regDIG2_TMDS_CONTROL_CHAR 0x232d @@ -10691,6 +10711,12 @@ #define regDIG3_DIG_BE_CNTL_BASE_IDX 2 #define regDIG3_DIG_BE_EN_CNTL 0x2429 #define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2 +#define regDIG3_HDCP_INT_CONTROL 0x242c +#define regDIG3_HDCP_INT_CONTROL_BASE_IDX 2 +#define regDIG3_HDCP_I2C_CONTROL_0 0x242e +#define regDIG3_HDCP_I2C_CONTROL_0_BASE_IDX 2 +#define regDIG3_HDCP_I2C_CONTROL_1 0x242f +#define regDIG3_HDCP_I2C_CONTROL_1_BASE_IDX 2 #define regDIG3_TMDS_CNTL 0x2450 #define regDIG3_TMDS_CNTL_BASE_IDX 2 #define regDIG3_TMDS_CONTROL_CHAR 0x2451 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h index 5d9d5fea6e06..e3d841b2e9af 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h @@ -2847,6 +2847,14 @@ #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x1 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x2 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x3 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x4 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x5 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x6 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x7 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x8 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x9 +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xa +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0xb #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0xc #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0xd #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xe @@ -2871,6 +2879,14 @@ #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000002L #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000004L #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000008L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000010L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000020L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000040L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000080L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000100L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000200L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000400L +#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000800L #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00001000L #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00002000L #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00004000L diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h index c75aee25619e..6f44345277af 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h @@ -1779,6 +1779,8 @@ #define mmRLC_TTOP_D 0x3105 #define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8 #define mmRLC_PG_AO_CU_MASK 0x310B +#define mmSPI_STATIC_THREAD_MGMT_1 0x2438 +#define mmSPI_STATIC_THREAD_MGMT_2 0x2439 #define mmSPI_STATIC_THREAD_MGMT_3 0x243A #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h index edc8a793a95d..4dd386b98748 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h @@ -234,6 +234,26 @@ #define mmIH_RB_WPTR_ADDR_HI 0x0F84 #define mmIH_RB_WPTR_ADDR_LO 0x0F85 #define mmIH_STATUS 0x0F88 + +#define mmDMA_GFX_RB_CNTL 0x3400 +#define mmDMA_GFX_RB_BASE 0x3401 +#define mmDMA_GFX_RB_RPTR 0x3402 +#define mmDMA_GFX_RB_WPTR 0x3403 +#define mmDMA_GFX_RB_RPTR_ADDR_HI 0x3407 +#define mmDMA_GFX_RB_RPTR_ADDR_LO 0x3408 +#define mmDMA_GFX_IB_CNTL 0x3409 +#define mmDMA_GFX_IB_RPTR 0x340a +#define mmDMA_CNTL 0x340b +#define mmDMA_STATUS_REG 0x340D +#define mmDMA_TILING_CONFIG 0x342E +#define mmDMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411 +#define mmDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412 +#define mmDMA_POWER_CNTL 0x342F +#define mmDMA_CLK_CTRL 0x3430 +#define mmDMA_PG 0x3435 +#define mmDMA_PGFSM_CONFIG 0x3436 +#define mmDMA_PGFSM_WRITE 0x3437 + #define mmSEM_MAILBOX 0x0F9B #define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A #define mmSEM_MAILBOX_CONTROL 0x0F9C @@ -269,7 +289,4 @@ #define mmVCE_CONFIG 0x0F94 #define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8 -/* from the old sid.h */ -#define mmDMA_TILING_CONFIG 0x342E - #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h index 1c540fe136cb..9f7fc2428b69 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h @@ -823,6 +823,43 @@ #define LX3__RESERVED__SHIFT 0x00000000 #define RINGOSC_MASK__MASK_MASK 0x0000ffffL #define RINGOSC_MASK__MASK__SHIFT 0x00000000 + +#define DMA_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define DMA_CNTL__TRAP_ENABLE__SHIFT 0x00000000 +#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE_MASK 0x00000002L +#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE__SHIFT 0x00000001 +#define DMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define DMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x00000002 +#define DMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define DMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x00000003 +#define DMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define DMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x00000004 +#define DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define DMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x0000001C +#define DMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define DMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x00000000 +#define DMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x00000001 +#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x00000009 +#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0x0000000C +#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0x0000000D +#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x00000010 +#define DMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define DMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x00000000 +#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x00000004 +#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK 0x80000000L +#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE__SHIFT 0x0000001F + +#define DMA_STATUS_REG__IDLE_MASK 0x00000001L +#define DMA_STATUS_REG__IDLE__SHIFT 0x00000000 +#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x00000008 +#define DMA_PG__PG_CNTL_ENABLE_MASK 0x00000001L +#define DMA_PG__PG_CNTL_ENABLE__SHIFT 0x00000000 + #define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L #define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000 #define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L @@ -1015,6 +1052,10 @@ #define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007 #define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L #define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003 +#define SRBM_STATUS2__DMA_BUSY_MASK 0x00000020L +#define SRBM_STATUS2__DMA_BUSY__SHIFT 0x00000005 +#define SRBM_STATUS2__DMA1_BUSY_MASK 0x00000040L +#define SRBM_STATUS2__DMA1_BUSY__SHIFT 0x00000006 #define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L #define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008 #define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h index 6b10be61efc3..bdef1f743df7 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h @@ -41,7 +41,49 @@ #define ixLCAC_MC5_CNTL 0x012B #define ixLCAC_MC5_OVR_SEL 0x012C #define ixLCAC_MC5_OVR_VAL 0x012D + +#define mmCG_SPLL_FUNC_CNTL 0x0180 +#define mmCG_SPLL_FUNC_CNTL_2 0x0181 +#define mmCG_SPLL_FUNC_CNTL_3 0x0182 +#define mmCG_SPLL_FUNC_CNTL_4 0x0183 +#define mmCG_SPLL_STATUS 0x0185 +#define mmSPLL_CNTL_MODE 0x0186 +#define mmCG_SPLL_SPREAD_SPECTRUM 0x0188 +#define mmCG_SPLL_SPREAD_SPECTRUM_2 0x0189 +#define mmCG_SPLL_AUTOSCALE_CNTL 0x018B +#define mmMPLL_BYPASSCLK_SEL 0x0197 +#define mmCG_CLKPIN_CNTL 0x0198 +#define mmCG_CLKPIN_CNTL_2 0x0199 +#define mmTHM_CLK_CNTL 0x019B +#define mmMISC_CLK_CNTL 0x019C +#define mmCG_THERMAL_CTRL 0x01C0 +#define mmCG_THERMAL_STATUS 0x01C1 +#define mmCG_THERMAL_INT 0x01C2 +#define mmCG_MULT_THERMAL_CTRL 0x01C4 +#define mmCG_MULT_THERMAL_STATUS 0x01C5 +#define mmCG_FDO_CTRL0 0x01D5 +#define mmCG_FDO_CTRL1 0x01D6 +#define mmCG_FDO_CTRL2 0x01D7 +#define mmCG_TACH_CTRL 0x01DC +#define mmCG_TACH_STATUS 0x01DD +#define mmGENERAL_PWRMGT 0x1E0 +#define mmCG_TPC 0x1E1 +#define mmSCLK_PWRMGT_CNTL 0x1E2 +#define mmTARGET_AND_CURRENT_PROFILE_INDEX 0x01E6 +#define mmCG_FTV 0x01EF +#define mmCG_FFCT_0 0x01F0 +#define mmCG_BSP 0x01FF +#define mmCG_AT 0x0200 +#define mmCG_GIT 0x0201 +#define mmCG_SSP 0x0203 +#define mmCG_DISPLAY_GAP_CNTL 0x020A +#define mmCG_ULV_CONTROL 0x021E +#define mmCG_ULV_PARAMETER 0x021F +#define mmSMC_SCRATCH0 0x0221 +#define mmCG_CAC_CTRL 0x022E + #define ixSMC_PC_C 0x80000370 + #define ixTHM_TMON0_DEBUG 0x03F0 #define ixTHM_TMON0_INT_DATA 0x0380 #define ixTHM_TMON0_RDIL0_DATA 0x0300 @@ -110,6 +152,7 @@ #define ixTHM_TMON1_RDIR7_DATA 0x0337 #define ixTHM_TMON1_RDIR8_DATA 0x0338 #define ixTHM_TMON1_RDIR9_DATA 0x0339 + #define mmGPIOPAD_A 0x05E7 #define mmGPIOPAD_EN 0x05E8 #define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1 @@ -127,6 +170,7 @@ #define mmGPIOPAD_STRENGTH 0x05E5 #define mmGPIOPAD_SW_INT_STAT 0x05E4 #define mmGPIOPAD_Y 0x05E9 + #define mmSMC_IND_ACCESS_CNTL 0x008A #define mmSMC_IND_DATA_0 0x0081 #define mmSMC_IND_DATA 0x0081 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h index 7d3925b7266e..67d3c7e13a48 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h @@ -23,10 +23,142 @@ #ifndef SMU_6_0_SH_MASK_H #define SMU_6_0_SH_MASK_H -#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL -#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000 -#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L +#define CG_AT__CG_R_MASK 0x0000FFFFL +#define CG_AT__CG_R__SHIFT 0x00000000 +#define CG_AT__CG_L_MASK 0xFFFF0000L +#define CG_AT__CG_L__SHIFT 0x00000010 + +#define CG_BSP__BSP_MASK 0x0000FFFFL +#define CG_BSP__BSP__SHIFT 0x00000000 +#define CG_BSP__BSU_MASK 0x000F0000L +#define CG_BSP__BSU__SHIFT 0x00000010 + +#define CG_CAC_CTRL__CAC_WINDOW_MASK 0x00FFFFFFL +#define CG_CAC_CTRL__CAC_WINDOW__SHIFT 0x00000000 + +#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x00000002L +#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x00000001 +#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x00000004L +#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x00000002 +#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x00000008L +#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x00000003 +#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x00000100L +#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x00000008 + +#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK 0x00000003L +#define CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT 0x00000000 +#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK 0x0000000CL +#define CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT 0x00000002 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x0003FFF0L +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x00000004 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x00700000 +#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x00000014 +#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK 0x03000000L +#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT 0x00000018 +#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK 0x0C000000L +#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT 0x0000001A + +#define CG_FFCT_0__UTC_0_MASK 0x000003FFL +#define CG_FFCT_0__UTC_0__SHIFT 0x00000000 +#define CG_FFCT_0__DTC_0_MASK 0x000FFC00L +#define CG_FFCT_0__DTC_0__SHIFT 0x0000000A + +#define CG_GIT__CG_GICST_MASK 0x0000FFFFL +#define CG_GIT__CG_GICST__SHIFT 0x00000000 +#define CG_GIT__CG_GIPOT_MASK 0xFFFF0000L +#define CG_GIT__CG_GIPOT__SHIFT 0x00000010 + +#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x00000001L +#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x00000000 +#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK 0x00000002L +#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP__SHIFT 0x00000001 +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x00000008L +#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x00000003 +#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003F0L #define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x007F00000 +#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x00000014 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x0000001FF +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x00000000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x00800000 +#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x00000017 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x04000000 +#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x0000001A +#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03FFFFFFL +#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000 +#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000L +#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x0000001C +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x00000002L +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x00000001 +#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x00000001L +#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x00000000 +#define CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK 0x0000FFF0L +#define CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT 0x00000004 +#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK 0x00000200L +#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT 0x00000000 +#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK 0x03FFFFFFL +#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR__SHIFT 0x00000009 + +#define CG_SSP__SST_MASK 0x0000FFFFL +#define CG_SSP__SST__SHIFT 0x00000000 +#define CG_SSP__SSTU_MASK 0x000F0000L +#define CG_SSP__SSTU__SHIFT 0x00000010 + +#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x00000007L +#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x00000000 +#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x003FC000 +#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0x0000000E +#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L +#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x00000009 +#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0x0000FF00L +#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x00000008 +#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0x00FF0000L +#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x00000010 +#define CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK 0x01000000L +#define CG_THERMAL_INT__THERM_INT_MASK_HIGH__SHIFT 0x00000018 +#define CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK 0x02000000 +#define CG_THERMAL_INT__THERM_INT_MASK_LOW__SHIFT 0x00000019 + +#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0x0FF00000L +#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x00000014 +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x00000000 +#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003fe00L +#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x00000009 + +#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL +#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x00000000 +#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL +#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x00000000 +#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL +#define CG_FDO_CTRL2__TMIN__SHIFT 0x00000000 +#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L +#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0x0000000B +#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xFE000000L +#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x00000019 + +#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x00000007L +#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x00000000 +#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L +#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x00000003 +#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xFFFFFFFFL +#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x00000000 + +#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x00000001L +#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x00000000 +#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x00000002L +#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x00000001 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x00000004L +#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x00000002 +#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x00000008L +#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x00000003 +#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x00000040L +#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x00000006 +#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x00000400L +#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0x0000000A +#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x00800000L +#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x00000017 + #define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL #define GPIOPAD_A__GPIO_A__SHIFT 0x00000000 #define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL @@ -195,6 +327,7 @@ #define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000 #define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL #define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000 + #define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L #define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000 #define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL @@ -243,6 +376,37 @@ #define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000 #define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL #define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000 + +#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0x0000FF00L +#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x00000008 + +#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x00000001L +#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x00000000 +#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x00000002L +#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x00000001 +#define SCLK_PWRMGT_CNTL__FIR_RESET_MASK 0x00000010L +#define SCLK_PWRMGT_CNTL__FIR_RESET__SHIFT 0x00000004 +#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK 0x00000020L +#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL__SHIFT 0x00000005 +#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK 0x00000040L +#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE__SHIFT 0x00000006 +#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x00000080L +#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x00000007 +#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x00000100L +#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x00000008 +#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x00000200L +#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x00000009 +#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x00000400L +#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0x0000000A +#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x00000800L +#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0x0000000B +#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x00001000L +#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0x0000000C +#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x00002000L +#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0x0000000D +#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x00004000L +#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0x0000000E + #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000 #define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L @@ -285,6 +449,7 @@ #define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000 #define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL #define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000 + #define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L #define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c #define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L @@ -293,6 +458,8 @@ #define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001 +#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK 0x0C000000L +#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL__SHIFT 0x0000001A #define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L #define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L @@ -303,10 +470,25 @@ #define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002 #define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L #define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d + #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L #define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018 #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L #define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c + +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK 0x000000F0L +#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT 0x00000004 + +#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0x000000FFL +#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x00000000 +#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0x0000FF00L +#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x00000008 + +#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK 0x000000FFL +#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT 0x00000000 +#define MISC_CLK_CNTL__ZCLK_SEL_MASK 0x0000FF00L +#define MISC_CLK_CNTL__ZCLK_SEL__SHIFT 0x00000008 + #define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL #define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000 #define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h index 14574112c469..72a118b2af69 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h @@ -1067,7 +1067,13 @@ #define regVCN_FEATURES_BASE_IDX 1 #define regUVD_GPUIOV_STATUS 0x0055 #define regUVD_GPUIOV_STATUS_BASE_IDX 1 +#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057 +#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1 #define regUVD_SCRATCH15 0x005c +#define regUVD_RAS_JPEG0_STATUS 0x0059 +#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1 +#define regUVD_RAS_JPEG1_STATUS 0x005a +#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1 #define regUVD_SCRATCH15_BASE_IDX 1 #define regUVD_VERSION 0x005d #define regUVD_VERSION_BASE_IDX 1 @@ -1147,6 +1153,22 @@ #define regUVD_DPG_LMA_CTL2_BASE_IDX 1 +// addressBlock: uvd_mmsch_dec +// base address: 0x20d2c +#define regMMSCH_VF_VMID 0x054b +#define regMMSCH_VF_VMID_BASE_IDX 1 +#define regMMSCH_VF_CTX_ADDR_LO 0x054c +#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1 +#define regMMSCH_VF_CTX_ADDR_HI 0x054d +#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1 +#define regMMSCH_VF_CTX_SIZE 0x054e +#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_HOST 0x0552 +#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1 +#define regMMSCH_VF_MAILBOX_RESP 0x0553 +#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1 + + // addressBlock: uvd_vcn_umsch_dec // base address: 0x21500 #define regVCN_UMSCH_MES_CNTL 0x0740 diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h index 5c119a6b87fb..c78b09d6fbae 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h @@ -5714,6 +5714,22 @@ //UVD_GPUIOV_STATUS #define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0 #define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L +//UVD_RAS_VCPU_VCODEC_STATUS +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L + +//UVD_RAS_JPEG0_STATUS +#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L +//UVD_RAS_JPEG1_STATUS +#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0 +#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f +#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL +#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L //UVD_SCRATCH15 #define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0 #define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL @@ -5929,6 +5945,29 @@ #define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L +// addressBlock: uvd_mmsch_dec +//MMSCH_VF_VMID +#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0 +#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5 +#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL +#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L +//MMSCH_VF_CTX_ADDR_LO +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6 +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L +//MMSCH_VF_CTX_ADDR_HI +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0 +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL +//MMSCH_VF_CTX_SIZE +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0 +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_HOST +#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_RESP +#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL + + // addressBlock: uvd_vcn_umsch_dec //VCN_UMSCH_MES_CNTL #define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0 diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index b78360a71bc9..b344acefc606 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -4308,7 +4308,7 @@ typedef struct _ATOM_DPCD_INFO // note2: From RV770, the memory is more than 32bit addressable, so we will change // ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains // exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware -// (in offset to start of memory address) is KB aligned instead of byte aligend. +// (in offset to start of memory address) is KB aligned instead of byte aligned. // Note3: /* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter, @@ -6017,7 +6017,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 #define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 #define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 #define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 -#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10 +#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10 //ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML #define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000 @@ -6460,7 +6460,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 // ulGPUCapInfo #define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08 -#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10 +#define SYS_INFO_V1_9_GPUCAPSINFO_ENABLE_DFS_BYPASS 0x10 //ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML #define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000 //ulGPUCapInfo[18]=1 indicate the IOMMU is not available diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0160d65f3f5e..5c86423c2e92 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -183,6 +183,7 @@ enum atom_dgpu_vram_type { ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61, ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70, ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80, + ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81, }; enum atom_dp_vs_preemph_def{ @@ -1713,7 +1714,7 @@ enum atom_system_vbiosmisc_def{ // gpucapinfo enum atom_system_gpucapinf_def{ - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS = 0x10, + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS = 0x10, }; //dpphy_override diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h index 3a4670bc4449..b98b7ae551b5 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h @@ -48,6 +48,7 @@ #define GFX_11_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error #define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout) #define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout +#define GFX_11_0_0__SRCID__SDMA_FENCE 67 // 0x43 User fence #define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning) diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h new file mode 100644 index 000000000000..467897ec2e65 --- /dev/null +++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __IRQSRCS_GFX_12_0_0_H__ +#define __IRQSRCS_GFX_12_0_0_H__ + +#define GFX_12_0_0__SRCID__UTCL2_FAULT 0 // UTCL2 has encountered a fault or retry scenario +#define GFX_12_0_0__SRCID__UTCL2_DATA_POISONING 1 // UTCL2 for data poisoning +#define GFX_12_0_0__SRCID__MEM_ACCES_MON 10 // 0x0A EA memory access monitor interrupt +#define GFX_12_0_0__SRCID__SDMA_ATOMIC_RTN_DONE 48 // 0x30 SDMA atomic*_rtn ops complete +#define GFX_12_0_0__SRCID__SDMA_TRAP 49 // 0x31 Trap +#define GFX_12_0_0__SRCID__SDMA_SRBMWRITE 50 // 0x32 SRBM write Protection +#define GFX_12_0_0__SRCID__SDMA_CTXEMPTY 51 // 0x33 Context Empty +#define GFX_12_0_0__SRCID__SDMA_PREEMPT 52 // 0x34 SDMA New Run List +#define GFX_12_0_0__SRCID__SDMA_IB_PREEMPT 53 // 0x35 sdma mid - command buffer preempt interrupt +#define GFX_12_0_0__SRCID__SDMA_DOORBELL_INVALID 54 // 0x36 Doorbell BE invalid +#define GFX_12_0_0__SRCID__SDMA_QUEUE_HANG 55 // 0x37 Queue hang or Command timeout +#define GFX_12_0_0__SRCID__SDMA_ATOMIC_TIMEOUT 56 // 0x38 SDMA atomic CMPSWAP loop timeout +#define GFX_12_0_0__SRCID__SDMA_POLL_TIMEOUT 57 // 0x39 SRBM read poll timeout +#define GFX_12_0_0__SRCID__SDMA_PAGE_TIMEOUT 58 // 0x3A Page retry timeout after UTCL2 return nack = 1 +#define GFX_12_0_0__SRCID__SDMA_PAGE_NULL 59 // 0x3B Page Null from UTCL2 when nack = 2 +#define GFX_12_0_0__SRCID__SDMA_PAGE_FAULT 60 // 0x3C Page Fault Error from UTCL2 when nack = 3 +#define GFX_12_0_0__SRCID__SDMA_VM_HOLE 61 // 0x3D MC or SEM address in VM hole +#define GFX_12_0_0__SRCID__SDMA_ECC 62 // 0x3E ECC Error +#define GFX_12_0_0__SRCID__SDMA_FROZEN 63 // 0x3F SDMA Frozen +#define GFX_12_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error +#define GFX_12_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout) +#define GFX_12_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout +#define GFX_12_0_0__SRCID__SDMA_FENCE 70 // 0x46 User fence +#define GFX_12_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning) +#define GFX_12_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int +#define GFX_12_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error +#define GFX_12_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt +#define GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR 183 // 0xB7 Bad Opcode Error +#define GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT 184 // 0xB8 Privileged Register Fault +#define GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT 185 // 0xB9 Privileged Instr Fault +#define GFX_12_0_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 // 0xBA Wait Memory Semaphore Fault (Sync Object Fault) +#define GFX_12_0_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 // 0xBB Context Empty Interrupt +#define GFX_12_0_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 // 0xBC Context Busy Interrupt +#define GFX_12_0_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 // 0xC0 CP.ME Wait_Reg_Mem Poll Timeout +#define GFX_12_0_0__SRCID__CP_SIG_INCOMPLETE 193 // 0xC1 "Surface Probe Fault Signal Incomplete" +#define GFX_12_0_0__SRCID__CP_PREEMPT_ACK 194 // 0xC2 Preemption Ack-wledge +#define GFX_12_0_0__SRCID__CP_GPF 195 // 0xC3 General Protection Fault (GPF) +#define GFX_12_0_0__SRCID__CP_GDS_ALLOC_ERROR 196 // 0xC4 GDS Alloc Error +#define GFX_12_0_0__SRCID__CP_ECC_ERROR 197 // 0xC5 ECC Error +#define GFX_12_0_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 // 0xC7 Compute query status +#define GFX_12_0_0__SRCID__CP_VM_DOORBELL 200 // 0xC8 Unattached VM Doorbell Received +#define GFX_12_0_0__SRCID__CP_FUE_ERROR 201 // 0xC9 ECC FUE Error +#define GFX_12_0_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 // 0xCA Streaming Perf Monitor Interrupt +#define GFX_12_0_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 // 0xE8 CRead timeout error +#define GFX_12_0_0__SRCID__GRBM_REG_GUI_IDLE 233 // 0xE9 Register GUI Idle +#define GFX_12_0_0__SRCID__SQ_INTERRUPT_ID 239 // 0xEF SQ Interrupt (ttrace wrap, errors) + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9189dcb65188..e2b1ea7467b0 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -108,6 +108,8 @@ enum pp_clock_type { PP_VCLK1, PP_DCLK, PP_DCLK1, + PP_ISPICLK, + PP_ISPXCLK, OD_SCLK, OD_MCLK, OD_VDDC_CURVE, @@ -128,6 +130,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_CPU_CLK, AMDGPU_PP_SENSOR_VDDNB, AMDGPU_PP_SENSOR_VDDGFX, + AMDGPU_PP_SENSOR_VDDBOARD, AMDGPU_PP_SENSOR_UVD_VCLK, AMDGPU_PP_SENSOR_UVD_DCLK, AMDGPU_PP_SENSOR_VCE_ECCLK, @@ -341,6 +344,7 @@ enum pp_policy_soc_pstate { #define MAX_CLKS 4 #define NUM_VCN 4 #define NUM_JPEG_ENG 32 +#define NUM_JPEG_ENG_V1 40 #define MAX_XCC 8 #define NUM_XCP 8 struct seq_file; @@ -376,6 +380,20 @@ struct amdgpu_xcp_metrics_v1_1 { uint64_t gfx_below_host_limit_acc[MAX_XCC]; }; +struct amdgpu_xcp_metrics_v1_2 { + /* Utilization Instantaneous (%) */ + uint32_t gfx_busy_inst[MAX_XCC]; + uint16_t jpeg_busy[NUM_JPEG_ENG_V1]; + uint16_t vcn_busy[NUM_VCN]; + /* Utilization Accumulated (%) */ + uint64_t gfx_busy_acc[MAX_XCC]; + /* Total App Clock Counter Accumulated */ + uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC]; + uint64_t gfx_below_host_limit_thm_acc[MAX_XCC]; + uint64_t gfx_low_utilization_acc[MAX_XCC]; + uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; +}; + struct amd_pm_funcs { /* export for dpm on ci and si */ int (*pre_set_power_state)(void *handle); @@ -414,6 +432,7 @@ struct amd_pm_funcs { int (*set_pp_table)(void *handle, const char *buf, size_t size); void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m); int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en); + int (*pause_power_profile)(void *handle, bool pause); /* export to amdgpu */ struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx); int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id, @@ -477,6 +496,7 @@ struct amd_pm_funcs { int (*set_df_cstate)(void *handle, enum pp_df_cstate state); int (*set_xgmi_pstate)(void *handle, uint32_t pstate); ssize_t (*get_gpu_metrics)(void *handle, void **table); + ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table); ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size); int (*set_watermarks_for_clock_ranges)(void *handle, struct pp_smu_wm_range_sets *ranges); @@ -1090,6 +1110,105 @@ struct gpu_metrics_v1_7 { uint32_t pcie_lc_perf_other_end_recovery; }; +struct gpu_metrics_v1_8 { + struct metrics_table_header common_header; + + /* Temperature (Celsius) */ + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrsoc; + + /* Power (Watts) */ + uint16_t curr_socket_power; + + /* Utilization (%) */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + + /* VRAM max bandwidthi (in GB/sec) at max memory clock */ + uint64_t mem_max_bandwidth; + + /* Energy (15.259uJ (2^-16) units) */ + uint64_t energy_accumulator; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Accumulation cycle counter */ + uint32_t accumulation_counter; + + /* Accumulated throttler residencies */ + uint32_t prochot_residency_acc; + uint32_t ppt_residency_acc; + uint32_t socket_thm_residency_acc; + uint32_t vr_thm_residency_acc; + uint32_t hbm_thm_residency_acc; + + /* Clock Lock Status. Each bit corresponds to clock instance */ + uint32_t gfxclk_lock_status; + + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + + /* XGMI bus width and bitrate (in Gbps) */ + uint16_t xgmi_link_width; + uint16_t xgmi_link_speed; + + /* Utilization Accumulated (%) */ + uint32_t gfx_activity_acc; + uint32_t mem_activity_acc; + + /*PCIE accumulated bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_acc; + + /*PCIE instantaneous bandwidth (GB/sec) */ + uint64_t pcie_bandwidth_inst; + + /* PCIE L0 to recovery state transition accumulated count */ + uint64_t pcie_l0_to_recov_count_acc; + + /* PCIE replay accumulated count */ + uint64_t pcie_replay_count_acc; + + /* PCIE replay rollover accumulated count */ + uint64_t pcie_replay_rover_count_acc; + + /* PCIE NAK sent accumulated count */ + uint32_t pcie_nak_sent_count_acc; + + /* PCIE NAK received accumulated count */ + uint32_t pcie_nak_rcvd_count_acc; + + /* XGMI accumulated data transfer size(KiloBytes) */ + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; + + /* XGMI link status(active/inactive) */ + uint16_t xgmi_link_status[NUM_XGMI_LINKS]; + + uint16_t padding; + + /* PMFW attached timestamp (10ns resolution) */ + uint64_t firmware_timestamp; + + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_GFX_CLKS]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + + /* Number of current partition */ + uint16_t num_partition; + + /* XCP metrics stats */ + struct amdgpu_xcp_metrics_v1_2 xcp_stats[NUM_XCP]; + + /* PCIE other end recovery counter */ + uint32_t pcie_lc_perf_other_end_recovery; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. @@ -1476,4 +1595,27 @@ struct amdgpu_pm_metrics { uint8_t data[]; }; +struct amdgpu_partition_metrics_v1_0 { + struct metrics_table_header common_header; + /* Current clocks (Mhz) */ + uint16_t current_gfxclk[MAX_XCC]; + uint16_t current_socclk[MAX_CLKS]; + uint16_t current_vclk0[MAX_CLKS]; + uint16_t current_dclk0[MAX_CLKS]; + uint16_t current_uclk; + uint16_t padding; + + /* Utilization Instantaneous (%) */ + uint32_t gfx_busy_inst[MAX_XCC]; + uint16_t jpeg_busy[NUM_JPEG_ENG_V1]; + uint16_t vcn_busy[NUM_VCN]; + /* Utilization Accumulated (%) */ + uint64_t gfx_busy_acc[MAX_XCC]; + /* Total App Clock Counter Accumulated */ + uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC]; + uint64_t gfx_below_host_limit_thm_acc[MAX_XCC]; + uint64_t gfx_low_utilization_acc[MAX_XCC]; + uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; +}; + #endif diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h index f8008270f813..3728389fc3be 100644 --- a/drivers/gpu/drm/amd/include/v11_structs.h +++ b/drivers/gpu/drm/amd/include/v11_structs.h @@ -535,8 +535,8 @@ struct v11_gfx_mqd { uint32_t reserved_507; // offset: 507 (0x1FB) uint32_t reserved_508; // offset: 508 (0x1FC) uint32_t reserved_509; // offset: 509 (0x1FD) - uint32_t reserved_510; // offset: 510 (0x1FE) - uint32_t reserved_511; // offset: 511 (0x1FF) + uint32_t fence_address_lo; // offset: 510 (0x1FE) + uint32_t fence_address_hi; // offset: 511 (0x1FF) }; struct v11_sdma_mqd { @@ -1118,8 +1118,8 @@ struct v11_compute_mqd { uint32_t reserved_443; // offset: 443 (0x1BB) uint32_t reserved_444; // offset: 444 (0x1BC) uint32_t reserved_445; // offset: 445 (0x1BD) - uint32_t reserved_446; // offset: 446 (0x1BE) - uint32_t reserved_447; // offset: 447 (0x1BF) + uint32_t fence_address_lo; // offset: 446 (0x1BE) + uint32_t fence_address_hi; // offset: 447 (0x1BF) uint32_t gws_0_val; // offset: 448 (0x1C0) uint32_t gws_1_val; // offset: 449 (0x1C1) uint32_t gws_2_val; // offset: 450 (0x1C2) diff --git a/drivers/gpu/drm/amd/include/v12_structs.h b/drivers/gpu/drm/amd/include/v12_structs.h index 5eabab611b02..03a35f8a65b0 100644 --- a/drivers/gpu/drm/amd/include/v12_structs.h +++ b/drivers/gpu/drm/amd/include/v12_structs.h @@ -535,8 +535,8 @@ struct v12_gfx_mqd { uint32_t reserved_507; // offset: 507 (0x1FB) uint32_t reserved_508; // offset: 508 (0x1FC) uint32_t reserved_509; // offset: 509 (0x1FD) - uint32_t reserved_510; // offset: 510 (0x1FE) - uint32_t reserved_511; // offset: 511 (0x1FF) + uint32_t fence_address_lo; // offset: 510 (0x1FE) + uint32_t fence_address_hi; // offset: 511 (0x1FF) }; struct v12_sdma_mqd { @@ -1118,8 +1118,8 @@ struct v12_compute_mqd { uint32_t reserved_443; // offset: 443 (0x1BB) uint32_t reserved_444; // offset: 444 (0x1BC) uint32_t reserved_445; // offset: 445 (0x1BD) - uint32_t reserved_446; // offset: 446 (0x1BE) - uint32_t reserved_447; // offset: 447 (0x1BF) + uint32_t fence_address_lo; // offset: 446 (0x1BE) + uint32_t fence_address_hi; // offset: 447 (0x1BF) uint32_t gws_0_val; // offset: 448 (0x1C0) uint32_t gws_1_val; // offset: 449 (0x1C1) uint32_t gws_2_val; // offset: 450 (0x1C2) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 81e9b443ca0a..71d986dd7a6e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_VPE: + case AMD_IP_BLOCK_TYPE_ISP: if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate, 0)); @@ -329,6 +330,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) return ret; } +bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + bool support_link_reset = false; + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + support_link_reset = smu_link_reset_is_support(smu); + mutex_unlock(&adev->pm.mutex); + } + + return support_link_reset; +} + +int amdgpu_dpm_link_reset(struct amdgpu_device *adev) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + ret = smu_link_reset(smu); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en) @@ -349,6 +378,25 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, return ret; } +int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, + bool pause) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + if (pp_funcs && pp_funcs->pause_power_profile) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->pause_power_profile( + adev->powerplay.pp_handle, pause); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, uint32_t pstate) { @@ -761,6 +809,21 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) return ret; } +int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret; + + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = smu_reset_vcn(smu, inst_mask); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t *min, @@ -790,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t max) { struct smu_context *smu = adev->powerplay.pp_handle; - int ret = 0; - - if (type != PP_SCLK) - return -EINVAL; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - mutex_lock(&adev->pm.mutex); - ret = smu_set_soft_freq_range(smu, - SMU_SCLK, + guard(mutex)(&adev->pm.mutex); + + return smu_set_soft_freq_range(smu, + type, min, max); - mutex_unlock(&adev->pm.mutex); - - return ret; } int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) @@ -1635,6 +1692,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) } } +int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev) +{ + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu->od_enabled; + } else { + struct pp_hwmgr *hwmgr; + + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } +} + int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const char *buf, size_t size) @@ -1957,3 +2036,35 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, return ret; } + +/** + * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute + * partition + * @adev: Pointer to the device. + * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. + * @table: Pointer to a buffer where the metrics will be stored. If NULL, the + * function returns the size of the metrics structure. + * + * This function retrieves metrics for a specific XCP, including details such as + * VCN/JPEG activity, clock frequencies, and other performance metrics. If the + * table parameter is NULL, the function returns the size of the metrics + * structure without populating it. + * + * Return: Size of the metrics structure on success, or a negative error code on failure. + */ +ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, + void *table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_xcp_metrics) + return 0; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id, + table); + mutex_unlock(&adev->pm.mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 922def51685b..4b64851fdb42 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) return -EINVAL; parameter_size++; + if (!tmp_str) + break; while (isspace(*tmp_str)) tmp_str++; } @@ -1606,7 +1608,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); long throttling_logging_interval; - unsigned long flags; int ret = 0; ret = kstrtol(buf, 0, &throttling_logging_interval); @@ -1617,18 +1618,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, return -EINVAL; if (throttling_logging_interval > 0) { - raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags); /* * Reset the ratelimit timer internals. * This can effectively restart the timer. */ - adev->throttling_logging_rs.interval = - (throttling_logging_interval - 1) * HZ; - adev->throttling_logging_rs.begin = 0; - adev->throttling_logging_rs.printed = 0; - adev->throttling_logging_rs.missed = 0; - raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags); - + ratelimit_state_reset_interval(&adev->throttling_logging_rs, + (throttling_logging_interval - 1) * HZ); atomic_set(&adev->throttling_logging_enabled, 1); } else { atomic_set(&adev->throttling_logging_enabled, 0); @@ -1897,7 +1892,7 @@ out: static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -1908,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { uint32_t ss_power; - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, (void *)&ss_power)) @@ -2944,6 +2939,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, return sysfs_emit(buf, "%d\n", vddgfx); } +static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + u32 vddboard; + int r; + + /* get the voltage */ + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&vddboard); + if (r) + return r; + + return sysfs_emit(buf, "%d\n", vddboard); +} + static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, struct device_attribute *attr, char *buf) @@ -2951,6 +2963,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, return sysfs_emit(buf, "vddgfx\n"); } +static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "vddboard\n"); +} static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, struct device_attribute *attr, char *buf) @@ -3294,6 +3312,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0) static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0); +static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0); static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); @@ -3341,6 +3361,8 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_in0_label.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_label.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in2_label.dev_attr.attr, &sensor_dev_attr_power1_average.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power1_cap_max.dev_attr.attr, @@ -3492,6 +3514,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_in1_label.dev_attr.attr)) return 0; + /* only few boards support vddboard */ + if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr || + attr == &sensor_dev_attr_in2_label.dev_attr.attr) && + amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, + (void *)&tmp) == -EOPNOTSUPP) + return 0; + /* no mclk on APUs other than gc 9,4,3*/ if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) && (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || @@ -3618,6 +3647,9 @@ static int parse_input_od_command_lines(const char *buf, return -EINVAL; parameter_size++; + if (!tmp_str) + break; + while (isspace(*tmp_str)) tmp_str++; } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index f93d287dbf13..768317ee1486 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -410,15 +410,19 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en); +int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, + bool pause); int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev); +int amdgpu_dpm_link_reset(struct amdgpu_device *adev); int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev); int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev); +bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev); int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev); int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, @@ -520,6 +524,8 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size); int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); +ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, + void *table); /** * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The @@ -557,6 +563,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, void **addr, size_t *size); int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev); +int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev); int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, const char *buf, size_t size); @@ -605,5 +612,6 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, enum pp_pm_policy p_type, char *buf); int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask); bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev); +int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask); #endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 59fae668dc3f..307ebf7e3226 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, enable); if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); } } @@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = kv_process_firmware_header(adev); if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); + drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n"); return ret; } kv_init_fps_limits(adev); kv_init_graphics_levels(adev); ret = kv_program_bootup_state(adev); if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); + drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n"); return ret; } kv_calculate_dfs_bypass_settings(adev); ret = kv_upload_dpm_settings(adev); if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); + drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n"); return ret; } ret = kv_populate_uvd_table(adev); if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n"); return ret; } ret = kv_populate_vce_table(adev); if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n"); return ret; } ret = kv_populate_samu_table(adev); if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n"); return ret; } ret = kv_populate_acp_table(adev); if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n"); return ret; } kv_program_vc(adev); @@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev) if (pi->enable_auto_thermal_throttling) { ret = kv_enable_auto_thermal_throttling(adev); if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n"); return ret; } } ret = kv_enable_dpm_voltage_scaling(adev); if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n"); return ret; } ret = kv_set_dpm_interval(adev); if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n"); return ret; } ret = kv_set_dpm_boot_state(adev); if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n"); return ret; } ret = kv_enable_ulv(adev, true); if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n"); return ret; } kv_start_dpm(adev); ret = kv_enable_didt(adev, true); if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_didt failed\n"); return ret; } ret = kv_enable_smc_cac(adev, true); if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n"); return ret; } @@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = amdgpu_kv_smc_bapm_enable(adev, false); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } @@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n"); return ret; } amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, @@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev) err = amdgpu_kv_smc_bapm_enable(adev, false); if (err) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); if (adev->asic_type == CHIP_MULLINS) kv_enable_nb_dpm(adev, false); @@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } } @@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle) ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_sclk_t(adev); @@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle) kv_set_enabled_levels(adev); ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_acp_boot_level(adev); @@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, if (high_temp > max_temp) high_temp = max_temp; if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp); return -EINVAL; } @@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) data_offset); if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev); return -EINVAL; } pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); @@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) else pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n"); } if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) @@ -2594,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); } if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) pi->caps_enable_dfs_bypass = true; sumo_construct_sclk_voltage_mapping_table(adev, @@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps) struct kv_ps *ps = kv_get_ps(rps); struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n", + rps->vclk, rps->dclk); for (i = 0; i < ps->num_levels; i++) { struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + drm_dbg(adev_to_drm(adev), + "power level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static void kv_dpm_fini(struct amdgpu_device *adev) @@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - DRM_INFO("amdgpu: dpm initialized\n"); + drm_info(adev_to_drm(adev), "dpm initialized\n"); return 0; dpm_failed: kv_dpm_fini(adev); - DRM_ERROR("amdgpu: dpm initialization failed\n"); + drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index c7518b13e787..ea3ace882a10 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -47,7 +47,7 @@ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) -void amdgpu_dpm_print_class_info(u32 class, u32 class2) +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2) { const char *s; @@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2) s = "performance"; break; } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); + drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s); if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tinternal class: none\n"); + else + drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "", + (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "", + (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "", + (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "", + (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "", + (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "", + (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "", + (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : ""); } -void amdgpu_dpm_print_cap_info(u32 caps) +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps) { - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n", + (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "", + (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "", + (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : ""); } -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps) { - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n", + rps == adev->pm.dpm.current_ps ? " c" : "", + rps == adev->pm.dpm.requested_ps ? " r" : "", + rps == adev->pm.dpm.boot_ps ? " b" : ""); } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) adev->pm.fan_max_rpm = controller->ucFanMaxRPM; } if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_CI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n", controller->ucType, controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) return -EINVAL; if (amdgpu_dpm == 1 && pp_funcs->print_power_state) { - printk("switching from power state:\n"); + drm_dbg(adev_to_drm(adev), "switching from power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); + drm_dbg(adev_to_drm(adev), "switching to power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h index 93bd3973330c..7120eef30509 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h @@ -23,10 +23,9 @@ #ifndef __LEGACY_DPM_H__ #define __LEGACY_DPM_H__ -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2); +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps); +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); void amdgpu_free_extended_power_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 1c25f3023e93..52e732be59e3 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -30,16 +30,32 @@ #include "amdgpu_atombios.h" #include "amdgpu_dpm_internal.h" #include "amd_pcie.h" -#include "sid.h" +#include "atom.h" +#include "gfx_v6_0.h" #include "r600_dpm.h" +#include "sid.h" #include "si_dpm.h" -#include "atom.h" #include "../include/pptable.h" #include <linux/math64.h> #include <linux/seq_file.h> #include <linux/firmware.h> #include <legacy_dpm.h> +#include "bif/bif_3_0_d.h" +#include "bif/bif_3_0_sh_mask.h" + +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#include "gca/gfx_6_0_d.h" +#include "gca/gfx_6_0_sh_mask.h" + +#include"gmc/gmc_6_0_d.h" +#include"gmc/gmc_6_0_sh_mask.h" + +#include "smu/smu_6_0_d.h" +#include "smu/smu_6_0_sh_mask.h" + #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b #define MC_CG_ARB_FREQ_F2 0x0c @@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) if (xclk == 0) return 0; - cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; + cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK; cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); wintime = (cac_window_size * 100) / xclk; @@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev, if (adev->pm.dpm.sq_ramping_threshold == 0) return -EINVAL; - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev, if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && enable_sq_ramping) { - sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); - sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); - sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); - sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); - sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); + sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT; + sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT; + sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT; + sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT; + sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT; } else { - sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; - sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK | + SQ_POWER_THROTTLE__MIN_POWER_MASK; + sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | + SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | + SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK; } smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); @@ -2761,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) if (!cac_tables) return -ENOMEM; - reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; - reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); - WREG32(CG_CAC_CTRL, reg); + reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK; + reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT); + WREG32(mmCG_CAC_CTRL, reg); si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; si_pi->dyn_powertune_data.dc_pwr_value = @@ -2962,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev) ret = si_calculate_sclk_params(adev, sclk, &sclk_params); if (ret) break; - p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; - fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; - clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; - clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; + p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT; + fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT; + clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT; + clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT; fb_div &= ~0x00001FFF; fb_div >>= 1; @@ -3669,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev) WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; - tmp = RREG32(MC_ARB_RAMCFG); - row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; - column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; - bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; + tmp = RREG32(mmMC_ARB_RAMCFG); + row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10; + column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8; + bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2; density = (1 << (row + column - 20 + bank)) * width; @@ -3756,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) } if (want_thermal_protection) { - WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); + WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK); if (pi->thermal_protection) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); } else { - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); } } @@ -3785,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev, static void si_start_dpm(struct amdgpu_device *adev) { - WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK); } static void si_stop_dpm(struct amdgpu_device *adev) { - WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK); } static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); + WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK); else - WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); + WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK); } @@ -3838,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, PPSMC_Msg msg, u32 parameter) { - WREG32(SMC_SCRATCH0, parameter); + WREG32(mmSMC_SCRATCH0, parameter); return amdgpu_si_send_msg_to_smc(adev, msg); } @@ -4023,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev) { struct si_power_info *si_pi = si_get_pi(adev); - si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); - si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); - si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); - si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); - si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); - si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); + si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL); + si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2); + si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3); + si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4); + si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM); + si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2); si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); @@ -4044,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); else - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK); } static void si_enable_acpi_power_management(struct amdgpu_device *adev) { - WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK); } #if 0 @@ -4132,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev) tmp = 0x1; if (eg_pi->sclk_deep_sleep) { - WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); - WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, - ~AUTOSCALE_ON_SS_CLEAR); + WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK); + WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK, + ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK); } } @@ -4143,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev) u32 tmp, pipe; int i; - tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); if (adev->pm.dpm.new_active_crtc_count > 0) - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; else - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); + tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; if (adev->pm.dpm.new_active_crtc_count > 1) - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; else - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); + tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; - WREG32(CG_DISPLAY_GAP_CNTL, tmp); + WREG32(mmCG_DISPLAY_GAP_CNTL, tmp); tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; @@ -4189,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) if (enable) { if (pi->sclk_ss) - WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK); } else { - WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); - WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); + WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK); } } @@ -4214,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev) &pi->pbsu); - pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); - pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); + pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT); + pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT); - WREG32(CG_BSP, pi->dsp); + WREG32(mmCG_BSP, pi->dsp); } static void si_program_git(struct amdgpu_device *adev) { - WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); + WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK); } static void si_program_tp(struct amdgpu_device *adev) @@ -4231,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev) enum r600_td td = R600_TD_DFLT; for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) - WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); + WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT)); if (td == R600_TD_AUTO) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); + WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK); else - WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); + WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK); if (td == R600_TD_UP) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); + WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK); if (td == R600_TD_DOWN) - WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); + WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK); } static void si_program_tpp(struct amdgpu_device *adev) { - WREG32(CG_TPC, R600_TPC_DFLT); + WREG32(mmCG_TPC, R600_TPC_DFLT); } static void si_program_sstp(struct amdgpu_device *adev) { - WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); + WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT)); } static void si_enable_display_gap(struct amdgpu_device *adev) { - u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); + u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL); - tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); - tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | - DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); + tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); + tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT | + R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT); - tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); - tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | - DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); - WREG32(CG_DISPLAY_GAP_CNTL, tmp); + tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK); + tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT | + R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT); + WREG32(mmCG_DISPLAY_GAP_CNTL, tmp); } static void si_program_vc(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); - WREG32(CG_FTV, pi->vrc); + WREG32(mmCG_FTV, pi->vrc); } static void si_clear_vc(struct amdgpu_device *adev) { - WREG32(CG_FTV, 0); + WREG32(mmCG_FTV, 0); } static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) @@ -4735,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, u32 dram_rows; u32 dram_refresh_rate; u32 mc_arb_rfsh_rate; - u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; + u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT; if (tmp >= 4) dram_rows = 16384; @@ -4909,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd); - reg = CG_R(0xffff) | CG_L(0); + reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT; table->initialState.level.aT = cpu_to_be32(reg); table->initialState.level.bSP = cpu_to_be32(pi->dsp); table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen; @@ -4935,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, table->initialState.level.dpm2.BelowSafeInc = 0; table->initialState.level.dpm2.PwrEfficiencyRatio = 0; - reg = MIN_POWER_MASK | MAX_POWER_MASK; + reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | + SQ_POWER_THROTTLE__MAX_POWER_MASK; table->initialState.level.SQPowerThrottle = cpu_to_be32(reg); - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | + SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | + SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK; table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg); return 0; @@ -5057,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(4); + spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT; table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl); @@ -5102,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, table->ACPIState.level.dpm2.BelowSafeInc = 0; table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0; - reg = MIN_POWER_MASK | MAX_POWER_MASK; + reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK; table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg); - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK; table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg); return 0; @@ -5250,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev) if (ret) return ret; - WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); - WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); + WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control); + WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter); lane_width = amdgpu_get_pcie_lanes(adev); si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); @@ -5294,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev, do_div(tmp, reference_clock); fbdiv = (u32) tmp; - spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); - spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); - spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); + spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK); + spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT; + spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT; - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(2); + spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT; - spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; - spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); - spll_func_cntl_3 |= SPLL_DITHEN; + spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK; + spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT; + spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK; if (pi->sclk_ss) { struct amdgpu_atom_ss ss; @@ -5314,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev, u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); - cg_spll_spread_spectrum &= ~CLK_S_MASK; - cg_spll_spread_spectrum |= CLK_S(clk_s); - cg_spll_spread_spectrum |= SSEN; + cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK; + cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT; + cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK; - cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; - cg_spll_spread_spectrum_2 |= CLK_V(clk_v); + cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK; + cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT; } } @@ -5485,7 +5507,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev, if (pi->mclk_stutter_mode_threshold && (pl->mclk <= pi->mclk_stutter_mode_threshold) && !eg_pi->uvd_enabled && - (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && + (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && (adev->pm.dpm.new_active_crtc_count <= 2)) { level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; } @@ -5579,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev, return -EINVAL; if (state->performance_level_count < 2) { - a_t = CG_R(0xffff) | CG_L(0); + a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT; smc_state->levels[0].aT = cpu_to_be32(a_t); return 0; } @@ -5600,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev, t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; } - a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; - a_t |= CG_R(t_l * pi->bsp / 20000); + a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK; + a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT; smc_state->levels[i].aT = cpu_to_be32(a_t); high_bsp = (i == state->performance_level_count - 2) ? pi->pbsp : pi->bsp; - a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); + a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT; smc_state->levels[i + 1].aT = cpu_to_be32(a_t); } @@ -6180,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev, static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK); else - WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); + WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK); } static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, @@ -6204,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) { u32 speed_cntl; - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; - speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; + speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK; + speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; return (u16)speed_cntl; } @@ -6412,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev) static int si_thermal_enable_alert(struct amdgpu_device *adev, bool enable) { - u32 thermal_int = RREG32(CG_THERMAL_INT); + u32 thermal_int = RREG32(mmCG_THERMAL_INT); if (enable) { PPSMC_Result result; - thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - WREG32(CG_THERMAL_INT, thermal_int); + thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK); + WREG32(mmCG_THERMAL_INT, thermal_int); result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); if (result != PPSMC_Result_OK) { DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); return -EINVAL; } } else { - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - WREG32(CG_THERMAL_INT, thermal_int); + thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; + WREG32(mmCG_THERMAL_INT, thermal_int); } return 0; @@ -6447,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev, return -EINVAL; } - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); - WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); + WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK); + WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK); + WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK); adev->pm.dpm.thermal.min_temp = low_temp; adev->pm.dpm.thermal.max_temp = high_temp; @@ -6463,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) u32 tmp; if (si_pi->fan_ctrl_is_in_default_mode) { - tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; + tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; si_pi->fan_ctrl_default_mode = tmp; - tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; + tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT; si_pi->t_min = tmp; si_pi->fan_ctrl_is_in_default_mode = false; } - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(0); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; + tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(mode); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); } static int si_thermal_setup_fan_table(struct amdgpu_device *adev) @@ -6495,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev) return 0; } - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; if (duty100 == 0) { adev->pm.dpm.fan.ucode_fan_control = false; @@ -6531,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev) reference_clock) / 1600); fan_table.fdo_max = cpu_to_be16((u16)duty100); - tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; + tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT; fan_table.temp_src = (uint8_t)tmp; ret = amdgpu_si_copy_bytes_to_smc(adev, @@ -6590,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle, if (adev->pm.no_fan) return -ENOENT; - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; + duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT; if (duty100 == 0) return -EINVAL; @@ -6621,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle, if (speed > 255) return -EINVAL; - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; if (duty100 == 0) return -EINVAL; @@ -6630,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle, do_div(tmp64, 255); duty = (u32)tmp64; - tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; - tmp |= FDO_STATIC_DUTY(duty); - WREG32(CG_FDO_CTRL0, tmp); + tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK; + tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT; + WREG32(mmCG_FDO_CTRL0, tmp); return 0; } @@ -6672,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode) if (si_pi->fan_is_controlled_by_smc) return 0; - tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; - *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT); + tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT); return 0; } @@ -6691,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, if (adev->pm.fan_pulses_per_revolution == 0) return -ENOENT; - tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; + tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT; if (tach_period == 0) return -ENOENT; @@ -6720,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, si_fan_ctrl_stop_smc_fan_control(adev); tach_period = 60 * xclk * 10000 / (8 * speed); - tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; - tmp |= TARGET_PERIOD(tach_period); - WREG32(CG_TACH_CTRL, tmp); + tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK; + tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT; + WREG32(mmCG_TACH_CTRL, tmp); si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); @@ -6736,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) u32 tmp; if (!si_pi->fan_ctrl_is_in_default_mode) { - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; + tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(si_pi->t_min); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; + tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); si_pi->fan_ctrl_is_in_default_mode = true; } } @@ -6760,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev) u32 tmp; if (adev->pm.fan_pulses_per_revolution) { - tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; - tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); - WREG32(CG_TACH_CTRL, tmp); + tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK; + tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT; + WREG32(mmCG_TACH_CTRL, tmp); } - tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; - tmp |= TACH_PWM_RESP_RATE(0x28); - WREG32(CG_FDO_CTRL2, tmp); + tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK; + tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT; + WREG32(mmCG_FDO_CTRL2, tmp); } static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) @@ -7530,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle, struct si_ps *ps = si_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; + (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT; if (current_index >= ps->performance_level_count) { seq_printf(m, "invalid dpm profile %d\n", current_index); @@ -7554,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; default: break; @@ -7571,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; + WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); break; default: break; @@ -7883,8 +7905,8 @@ static int si_dpm_get_temp(void *handle) int actual_temp = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> - CTF_TEMP_SHIFT; + temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; if (temp & 0x200) actual_temp = 255; @@ -7929,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle, struct rv7xx_pl *pl; int i; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) @@ -8014,8 +8036,8 @@ static int si_dpm_read_sensor(void *handle, int idx, struct si_ps *ps = si_get_ps(rps); uint32_t sclk, mclk; u32 pl_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; + (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT; /* size must be at least 4 bytes for all sensors */ if (*size < 4) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c index 8f994ffa9cd1..4e65ab9e931c 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c @@ -30,6 +30,12 @@ #include "amdgpu_ucode.h" #include "sislands_smc.h" +#include "smu/smu_6_0_d.h" +#include "smu/smu_6_0_sh_mask.h" + +#include "gca/gfx_6_0_d.h" +#include "gca/gfx_6_0_sh_mask.h" + static int si_set_smc_sram_address(struct amdgpu_device *adev, u32 smc_address, u32 limit) { @@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev, if ((smc_address + 3) > limit) return -EINVAL; - WREG32(SMC_IND_INDEX_0, smc_address); - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + WREG32(mmSMC_IND_INDEX_0, smc_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); return 0; } @@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, if (ret) goto done; - WREG32(SMC_IND_DATA_0, data); + WREG32(mmSMC_IND_DATA_0, data); src += 4; byte_count -= 4; @@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, if (ret) goto done; - original_data = RREG32(SMC_IND_DATA_0); + original_data = RREG32(mmSMC_IND_DATA_0); extra_shift = 8 * (4 - byte_count); while (byte_count > 0) { @@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, if (ret) goto done; - WREG32(SMC_IND_DATA_0, data); + WREG32(mmSMC_IND_DATA_0, data); } done: @@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev) { u32 tmp; - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); + RREG32(mmCB_CGTT_SCLK_CTRL); tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | RST_REG; @@ -170,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, if (!amdgpu_si_is_smc_running(adev)) return PPSMC_Result_Failed; - WREG32(SMC_MESSAGE_0, msg); + WREG32(mmSMC_MESSAGE_0, msg); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SMC_RESP_0); + tmp = RREG32(mmSMC_RESP_0); if (tmp != 0) break; udelay(1); } - return (PPSMC_Result)RREG32(SMC_RESP_0); + return (PPSMC_Result)RREG32(mmSMC_RESP_0); } PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) @@ -225,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit) return -EINVAL; spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(SMC_IND_INDEX_0, ucode_start_address); - WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); + WREG32(mmSMC_IND_INDEX_0, ucode_start_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); while (ucode_size >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - WREG32(SMC_IND_DATA_0, data); + WREG32(mmSMC_IND_DATA_0, data); src += 4; ucode_size -= 4; } - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return 0; @@ -251,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, spin_lock_irqsave(&adev->smc_idx_lock, flags); ret = si_set_smc_sram_address(adev, smc_address, limit); if (ret == 0) - *value = RREG32(SMC_IND_DATA_0); + *value = RREG32(mmSMC_IND_DATA_0); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return ret; @@ -266,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, spin_lock_irqsave(&adev->smc_idx_lock, flags); ret = si_set_smc_sram_address(adev, smc_address, limit); if (ret == 0) - WREG32(SMC_IND_DATA_0, value); + WREG32(mmSMC_IND_DATA_0, value); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); return ret; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index 4bd92fd782be..8d40ed0f0e83 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table( vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + if (!vram_info) { + pr_err("Could not retrieve the VramInfo table!"); + return -EINVAL; + } if (module_index >= vram_info->ucNumOfVRAMModule) { pr_err("Invalid VramInfo table."); @@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2( vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *) smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); + if (!vram_info) { + pr_err("Could not retrieve the VramInfo table!"); + return -EINVAL; + } if (module_index >= vram_info->ucNumOfVRAMModule) { pr_err("Invalid VramInfo table."); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c index a8fc0fa44db6..ba5c1237fcfe 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c @@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0) || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 9d3b33446adc..9b20076e26c0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) } if (le32_to_cpu(info->ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) { + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableDFSBypass); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index 79a566f3564a..c305ea4ec17d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, } cgs_write_register(hwmgr->device, indirect_port, index); - return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask); } int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 379012494da5..56423aedf3fa 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int result = 0; if (hwmgr->thermal_controller.fanInfo.bNoFan || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return -1; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index a3331ffb2daf..1b1c88590156 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t tach_period, crystal_clock_freq; int result = 0; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c index 5a010cd38303..baf51cd82a35 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c @@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, } -int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) -{ - uint32_t data; - uint32_t addr; - uint8_t *dest_byte; - uint8_t i, data_byte[4] = {0}; - uint32_t *pdata = (uint32_t *)&data_byte; - - PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL); - PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL); - - addr = smc_start_address; - - while (byte_count >= 4) { - smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); - - *dest = PP_SMC_TO_HOST_UL(data); - - dest += 1; - byte_count -= 4; - addr += 4; - } - - if (byte_count) { - smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); - *pdata = PP_SMC_TO_HOST_UL(data); - /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ - dest_byte = (uint8_t *)dest; - for (i = 0; i < byte_count; i++) - dest_byte[i] = data_byte[i]; - } - - return 0; -} - - int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) { diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h index e7303dc8c260..63e428ceaee4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h @@ -53,8 +53,6 @@ struct smu7_smumgr { }; -int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, - uint32_t *dest, uint32_t byte_count, uint32_t limit); int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit); int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 033c3229b555..756afe78a6e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -76,6 +76,7 @@ static void smu_power_profile_mode_get(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); static void smu_power_profile_mode_put(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); +static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -134,12 +135,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) } int smu_set_soft_freq_range(struct smu_context *smu, - enum smu_clk_type clk_type, + enum pp_clock_type type, uint32_t min, uint32_t max) { + enum smu_clk_type clk_type; int ret = 0; + clk_type = smu_convert_to_smuclk(type); + if (clk_type == SMU_CLK_COUNT) + return -EINVAL; + if (smu->ppt_funcs->set_soft_freq_limited_range) ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, @@ -307,6 +313,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu, return ret; } +static int smu_dpm_set_isp_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret; + + if (!smu->ppt_funcs->dpm_set_isp_enable) + return 0; + + if (atomic_read(&power_gate->isp_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->isp_gated, !enable); + + return ret; +} + static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -408,6 +434,12 @@ static int smu_dpm_set_power_gate(void *handle, dev_err(smu->adev->dev, "Failed to power %s VPE!\n", gate ? "gate" : "ungate"); break; + case AMD_IP_BLOCK_TYPE_ISP: + ret = smu_dpm_set_isp_enable(smu, !gate); + if (ret) + dev_err(smu->adev->dev, "Failed to power %s ISP!\n", + gate ? "gate" : "ungate"); + break; default: dev_err(smu->adev->dev, "Unsupported block type!\n"); return -EINVAL; @@ -1004,6 +1036,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } +static void smu_update_gpu_addresses(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; + struct smu_table *driver_table = &(smu_table->driver_table); + struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; + + if (pm_status_table->bo) + pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); + if (driver_table->bo) + driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); + if (dummy_read_1_table->bo) + dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); +} + /** * smu_alloc_memory_pool - allocate memory pool in the system memory * @@ -1285,6 +1332,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); + atomic_set(&smu->smu_power.power_gate.isp_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); smu_init_power_profile(smu); @@ -1672,37 +1720,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) } } - ret = smu_system_features_control(smu, true); - if (ret) { - dev_err(adev->dev, "Failed to enable requested dpm features!\n"); - return ret; - } - - smu_init_xgmi_plpd_mode(smu); - - ret = smu_feature_get_enabled_mask(smu, &features_supported); - if (ret) { - dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); - return ret; - } - bitmap_copy(feature->supported, - (unsigned long *)&features_supported, - feature->feature_num); - - if (!smu_is_dpm_running(smu)) - dev_info(adev->dev, "dpm has been disabled\n"); - - /* - * Set initialized values (get from vbios) to dpm tables context such as - * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each - * type of clks. - */ - ret = smu_set_default_dpm_table(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); - return ret; - } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) pcie_gen = 4; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -1738,6 +1755,37 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } + ret = smu_system_features_control(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable requested dpm features!\n"); + return ret; + } + + smu_init_xgmi_plpd_mode(smu); + + ret = smu_feature_get_enabled_mask(smu, &features_supported); + if (ret) { + dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); + return ret; + } + bitmap_copy(feature->supported, + (unsigned long *)&features_supported, + feature->feature_num); + + if (!smu_is_dpm_running(smu)) + dev_info(adev->dev, "dpm has been disabled\n"); + + /* + * Set initialized values (get from vbios) to dpm tables context such as + * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each + * type of clks. + */ + ret = smu_set_default_dpm_table(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); + return ret; + } + ret = smu_get_thermal_temperature_range(smu); if (ret) { dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); @@ -1780,6 +1828,9 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + smu_update_gpu_addresses(smu); + smu->smc_fw_state = SMU_FW_INIT; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2398,7 +2449,11 @@ static int smu_switch_power_profile(void *handle, smu_power_profile_mode_get(smu, type); else smu_power_profile_mode_put(smu, type); - ret = smu_bump_power_profile_mode(smu, NULL, 0); + /* don't switch the active workload when paused */ + if (smu->pause_workload) + ret = 0; + else + ret = smu_bump_power_profile_mode(smu, NULL, 0); if (ret) { if (enable) smu_power_profile_mode_put(smu, type); @@ -2411,6 +2466,35 @@ static int smu_switch_power_profile(void *handle, return 0; } +static int smu_pause_power_profile(void *handle, + bool pause) +{ + struct smu_context *smu = handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + int ret; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + smu->pause_workload = pause; + + /* force to bootup default profile */ + if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, + workload_mask, + NULL, + 0); + else + ret = smu_bump_power_profile_mode(smu, NULL, 0); + return ret; + } + + return 0; +} + static enum amd_dpm_forced_level smu_get_performance_level(void *handle) { struct smu_context *smu = handle; @@ -2902,6 +2986,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_DCLK; break; case PP_DCLK1: clk_type = SMU_DCLK1; break; + case PP_ISPICLK: + clk_type = SMU_ISPICLK; + break; + case PP_ISPXCLK: + clk_type = SMU_ISPXCLK; + break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: @@ -3399,15 +3489,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) return ret; } -bool smu_mode2_reset_is_support(struct smu_context *smu) +bool smu_link_reset_is_support(struct smu_context *smu) { bool ret = false; if (!smu->pm_enabled) return false; - if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) - ret = smu->ppt_funcs->mode2_reset_is_support(smu); + if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support) + ret = smu->ppt_funcs->link_reset_is_support(smu); return ret; } @@ -3442,6 +3532,19 @@ static int smu_mode2_reset(void *handle) return ret; } +int smu_link_reset(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->pm_enabled) + return -EOPNOTSUPP; + + if (smu->ppt_funcs->link_reset) + ret = smu->ppt_funcs->link_reset(smu); + + return ret; +} + static int smu_enable_gfx_features(void *handle) { struct smu_context *smu = handle; @@ -3712,6 +3815,19 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, return ret; } +static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) +{ + struct smu_context *smu = handle; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics) + return -EOPNOTSUPP; + + return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table); +} + static const struct amd_pm_funcs swsmu_pm_funcs = { /* export for sysfs */ .set_fan_control_mode = smu_set_fan_control_mode, @@ -3733,6 +3849,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_pp_table = smu_sys_get_pp_table, .set_pp_table = smu_sys_set_pp_table, .switch_power_profile = smu_switch_power_profile, + .pause_power_profile = smu_pause_power_profile, /* export to amdgpu */ .dispatch_tasks = smu_handle_dpm_task, .load_firmware = smu_load_microcode, @@ -3769,6 +3886,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_uclk_dpm_states = smu_get_uclk_dpm_states, .get_dpm_clock_table = smu_get_dpm_clock_table, .get_smu_prv_buf_details = smu_get_prv_buffer_details, + .get_xcp_metrics = smu_sys_get_xcp_metrics, }; int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, @@ -3941,3 +4059,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) return ret; } + +int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask) +{ + if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn) + smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask); + + return 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3ba169639f54..b52e194397e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -402,6 +402,7 @@ struct smu_power_gate { atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; atomic_t jpeg_gated; atomic_t vpe_gated; + atomic_t isp_gated; atomic_t umsch_mm_gated; }; @@ -438,9 +439,11 @@ struct mclock_latency_table { }; enum smu_reset_mode { - SMU_RESET_MODE_0, - SMU_RESET_MODE_1, - SMU_RESET_MODE_2, + SMU_RESET_MODE_0, + SMU_RESET_MODE_1, + SMU_RESET_MODE_2, + SMU_RESET_MODE_3, + SMU_RESET_MODE_4, }; enum smu_baco_state { @@ -558,6 +561,7 @@ struct smu_context { /* asic agnostic workload mask */ uint32_t workload_mask; + bool pause_workload; /* default/user workload preference */ uint32_t power_profile_mode; uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; @@ -1228,10 +1232,11 @@ struct pptable_funcs { * @mode1_reset_is_support: Check if GPU supports mode1 reset. */ bool (*mode1_reset_is_support)(struct smu_context *smu); + /** - * @mode2_reset_is_support: Check if GPU supports mode2 reset. + * @link_reset_is_support: Check if GPU supports link reset. */ - bool (*mode2_reset_is_support)(struct smu_context *smu); + bool (*link_reset_is_support)(struct smu_context *smu); /** * @mode1_reset: Perform mode1 reset. @@ -1251,6 +1256,13 @@ struct pptable_funcs { int (*enable_gfx_features)(struct smu_context *smu); /** + * @link_reset: Perform link reset. + * + * The gfx device driver reset + */ + int (*link_reset)(struct smu_context *smu); + + /** * @get_dpm_ultimate_freq: Get the hard frequency range of a clock * domain in MHz. */ @@ -1382,6 +1394,11 @@ struct pptable_funcs { bool (*reset_sdma_is_supported)(struct smu_context *smu); /** + * @reset_vcn: message SMU to soft reset vcn instance. + */ + int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask); + + /** * @get_ecc_table: message SMU to get ECC INFO table. */ ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); @@ -1420,6 +1437,12 @@ struct pptable_funcs { int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); /** + * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power + * management. + */ + int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); + + /** * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power * management. */ @@ -1450,6 +1473,12 @@ struct pptable_funcs { */ int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, struct freq_band_range *exclusion_ranges); + /** + * @get_xcp_metrics: Get a copy of the partition metrics table from SMU. + * Return: Size of table + */ + ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, + void *table); }; typedef enum { @@ -1600,8 +1629,9 @@ int smu_get_power_limit(void *handle, enum pp_power_type pp_power_type); bool smu_mode1_reset_is_support(struct smu_context *smu); -bool smu_mode2_reset_is_support(struct smu_context *smu); +bool smu_link_reset_is_support(struct smu_context *smu); int smu_mode1_reset(struct smu_context *smu); +int smu_link_reset(struct smu_context *smu); extern const struct amd_ip_funcs smu_ip_funcs; @@ -1612,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); -int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, uint32_t min, uint32_t max); int smu_set_gfx_power_up_by_imu(struct smu_context *smu); @@ -1642,6 +1672,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); int smu_send_rma_reason(struct smu_context *smu); int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask); bool smu_reset_sdma_is_supported(struct smu_context *smu); +int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask); int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, int level); ssize_t smu_get_pm_policy_info(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 1bc30db22f9c..cd44f4254134 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -106,6 +106,7 @@ typedef struct { #define NUM_FCLK_DPM_LEVELS 8 #define NUM_MEM_PSTATE_LEVELS 4 +#define ISP_ALL_TILES_MASK 0x7FF typedef struct { uint32_t UClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index d7505cfc433a..0a2ca544f4e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -86,8 +86,10 @@ typedef enum { /*36*/ FEATURE_PIT = 36, /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, +/*39*/ FEATURE_GLOBAL_DPM = 39, +/*40*/ FEATURE_NODE_POWER_MANAGER = 40, -/*39*/ NUM_FEATURES = 39 +/*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; //enum for MPIO PCIe gen speed msgs @@ -133,7 +135,7 @@ typedef enum { GFX_DVM_MARGIN_COUNT } GFX_DVM_MARGIN_e; -#define SMU_METRICS_TABLE_VERSION 0x12 +#define SMU_METRICS_TABLE_VERSION 0x13 typedef struct __attribute__((packed, aligned(4))) { uint64_t AccumulationCounter; @@ -275,6 +277,16 @@ typedef struct { //PSNs uint64_t PublicSerialNumber_AID[4]; uint64_t PublicSerialNumber_XCD[8]; + + //XGMI + uint32_t MaxXgmiWidth; + uint32_t MaxXgmiBitrate; + + // Telemetry + uint32_t InputTelemetryVoltageInmV; + + // General info + uint32_t pldmVersion[2]; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index f8ed45857878..01790a927930 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -127,7 +127,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xF +#define SMU_METRICS_TABLE_VERSION 0x11 // Unified metrics table for smu_v13_0_6 typedef struct __attribute__((packed, aligned(4))) { @@ -241,7 +241,10 @@ typedef struct __attribute__((packed, aligned(4))) { uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated //Total App Clock Counter - uint64_t GfxclkBelowHostLimitAcc[8]; + uint64_t GfxclkBelowHostLimitPptAcc[8]; + uint64_t GfxclkBelowHostLimitThmAcc[8]; + uint64_t GfxclkBelowHostLimitTotalAcc[8]; + uint64_t GfxclkLowUtilizationAcc[8]; } MetricsTableV0_t; // Metrics table for smu_v13_0_6 APUS @@ -456,4 +459,13 @@ typedef struct __attribute__((packed, aligned(4))) { uint64_t AccGfxclkBelowHostLimit; } VfMetricsTable_t; +#pragma pack(push, 4) +typedef struct { + // Telemetry + uint32_t InputTelemetryVoltageInmV; + // General info + uint32_t pldmVersion[2]; +} StaticMetricsTable_t; +#pragma pack(pop) + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 288b2576432b..41f268313613 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -94,7 +94,9 @@ #define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 #define PPSMC_MSG_SetThrottlingPolicy 0x44 #define PPSMC_MSG_ResetSDMA 0x4D -#define PPSMC_Message_Count 0x4E +#define PPSMC_MSG_ResetVCN 0x4E +#define PPSMC_MSG_GetStaticMetricsTable 0x59 +#define PPSMC_Message_Count 0x5A //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index c9dee09395e3..d7a9e41820fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -277,6 +277,7 @@ __SMU_DUMMY_MAP(MALLPowerController), \ __SMU_DUMMY_MAP(MALLPowerState), \ __SMU_DUMMY_MAP(ResetSDMA), \ + __SMU_DUMMY_MAP(ResetVCN), \ __SMU_DUMMY_MAP(GetStaticMetricsTable), #undef __SMU_DUMMY_MAP @@ -304,6 +305,8 @@ enum smu_clk_type { SMU_MCLK, SMU_PCIE, SMU_LCLK, + SMU_ISPICLK, + SMU_ISPXCLK, SMU_OD_CCLK, SMU_OD_SCLK, SMU_OD_MCLK, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index ed8304d82831..56ae555bb52a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu, enum smu_clk_type clk_type, struct smu_11_0_dpm_table *single_dpm_table); -int smu_v11_0_get_dpm_level_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t *min_value, - uint32_t *max_value); - int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu); uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index cd03caffe317..4263798d716b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -112,6 +112,7 @@ struct smu_13_0_dpm_context { uint32_t workload_policy_mask; uint32_t dcef_min_ds_clk; uint64_t caps; + uint32_t board_volt; }; enum smu_13_0_power_state { @@ -162,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu); int smu_v13_0_system_features_control(struct smu_context *smu, bool en); -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count); - int smu_v13_0_set_allowed_mask(struct smu_context *smu); int smu_v13_0_notify_display_change(struct smu_context *smu); @@ -183,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu); int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value); -int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); - -int -smu_v13_0_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request - *clock_req); - uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu); @@ -226,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, bool automatic); -int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max); - int smu_v13_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); @@ -310,14 +297,6 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, uint32_t *value); void smu_v13_0_interrupt_work(struct smu_context *smu); -bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); -int smu_v13_0_12_get_max_metrics_size(void); -int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); -int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, - MetricsMember_t member, - uint32_t *value); -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table); -extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; -extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; +void smu_v13_0_reset_custom_level(struct smu_context *smu); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 8aa61a9f7778..9ad46f545d15 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1267,6 +1267,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; + if (!speed || speed > UINT_MAX/8) + return -EINVAL; + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT), @@ -1344,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu, *default_power_limit = power_limit; if (max_power_limit) *max_power_limit = power_limit; - /** + /* * No lower bound is imposed on the limit. Any unreasonable limit set * will result in frequent throttling. */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 7fad5dfb39c4..aac202d0c30e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { @@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, } for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : pcie_width_cap); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - - if (ret) - return ret; - - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + if (pptable->PcieGenSpeed[i] > pcie_gen_cap || + pptable->PcieLaneCount[i] > pcie_width_cap) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = + pptable->PcieGenSpeed[i] > pcie_gen_cap ? + pcie_gen_cap : pptable->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = + pptable->PcieLaneCount[i] > pcie_width_cap ? + pcie_width_cap : pptable->PcieLaneCount[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_gen_cap << 8; + smu_pcie_arg |= pcie_width_cap; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static inline void navi10_dump_od_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 19a25fdc2f5b..d57591509aed 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint8_t min_gen_speed, max_gen_speed; uint8_t min_lane_width, max_lane_width; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); @@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16 | + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) || + table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) { + smu_pcie_arg = (i << 16 | pcie_table->pcie_gen[i] << 8 | pcie_table->pcie_lane[i]); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, @@ -3089,11 +3093,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, return 0; } -static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int sienna_cichlid_mode2_reset(struct smu_context *smu) { int ret = 0, index; @@ -3229,7 +3228,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings, .set_config_table = sienna_cichlid_set_config_table, .get_unique_id = sienna_cichlid_get_unique_id, - .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported, .mode2_reset = sienna_cichlid_mode2_reset, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 78391d8f35a9..78e4186d06cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1204,7 +1204,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; - if (speed == 0) + if (!speed || speed > UINT_MAX/8) return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement @@ -2059,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu, return 0; } -int smu_v11_0_get_dpm_level_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t *min_value, - uint32_t *max_value) -{ - uint32_t level_count = 0; - int ret = 0; - - if (!min_value && !max_value) - return -EINVAL; - - if (min_value) { - /* by default, level 0 clock value as min value */ - ret = smu_v11_0_get_dpm_freq_by_index(smu, - clk_type, - 0, - min_value); - if (ret) - return ret; - } - - if (max_value) { - ret = smu_v11_0_get_dpm_level_count(smu, - clk_type, - &level_count); - if (ret) - return ret; - - ret = smu_v11_0_get_dpm_freq_by_index(smu, - clk_type, - level_count - 1, - max_value); - if (ret) - return ret; - } - - return ret; -} - int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index a55ea76d7399..2c9869feba61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_CCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); break; case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", - smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); - size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", - smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); break; case SMU_SOCCLK: /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 9481f897432d..e97b0cf19197 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); bool cur_value_match_level = false; memset(&metrics, 0, sizeof(metrics)); @@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMinGfxclkFrequency, - 0, &min); - if (ret) - return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMaxGfxclkFrequency, - 0, &max); - if (ret) - return ret; - size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); - } + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMinGfxclkFrequency, + 0, &min); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMaxGfxclkFrequency, + 0, &max); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); break; case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; - max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; - size += sysfs_emit_at(buf, size, "OD_SCLK\n"); - size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); - size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); - } + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + size += sysfs_emit_at(buf, size, "OD_SCLK\n"); + size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); + size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); break; case SMU_GFXCLK: case SMU_SCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 83163d7c7f00..c63d2e28954d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, return 0; } +static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk; + + if (amdgpu_sriov_vf(smu->adev)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + return -EINVAL; + } + + min_clk = dpm_table->min; + max_clk = dpm_table->max; + + if (min) { + if (!min_clk) + return -ENODATA; + *min = min_clk; + } + if (max) { + if (!max_clk) + return -ENODATA; + *max = max_clk; + } + + } else { + return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + } + + return 0; +} + static int aldebaran_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -1270,6 +1325,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu, struct smu_13_0_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + int r; /* Disable determinism if switching to another mode */ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && @@ -1282,7 +1338,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu, case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: return 0; - + case AMD_DPM_FORCED_LEVEL_AUTO: + r = smu_v13_0_set_performance_level(smu, level); + if (!r) + smu_v13_0_reset_custom_level(smu); + return r; case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_LOW: case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: @@ -1423,7 +1483,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_ min_clk = dpm_context->dpm_tables.gfx_table.min; max_clk = dpm_context->dpm_tables.gfx_table.max; - return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false); + ret = aldebaran_set_soft_freq_limited_range( + smu, SMU_GFXCLK, min_clk, max_clk, false); + if (ret) + return ret; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -1976,11 +2040,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) return true; } -static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu) -{ - return true; -} - static int aldebaran_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { @@ -2077,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, .get_bamaco_support = aldebaran_get_bamaco_support, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, @@ -2086,7 +2145,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, - .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr, .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 0915d6377613..1a1f2a6b2e52 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -709,18 +715,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) return ret; } -int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) -{ - int ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); - if (ret) - dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); - - return ret; -} - int smu_v13_0_set_driver_table_location(struct smu_context *smu) { struct smu_table *driver_table = &smu->smu_table.driver_table; @@ -761,18 +755,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu) return ret; } -int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) -{ - int ret = 0; - - if (!smu->pm_enabled) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); - - return ret; -} - int smu_v13_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; @@ -1073,56 +1055,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) } -int -smu_v13_0_display_clock_voltage_request(struct smu_context *smu, - struct pp_display_clock_request - *clock_req) -{ - enum amd_pp_clock_type clk_type = clock_req->clock_type; - int ret = 0; - enum smu_clk_type clk_select = 0; - uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || - smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { - switch (clk_type) { - case amd_pp_dcef_clock: - clk_select = SMU_DCEFCLK; - break; - case amd_pp_disp_clock: - clk_select = SMU_DISPCLK; - break; - case amd_pp_pixel_clock: - clk_select = SMU_PIXCLK; - break; - case amd_pp_phy_clock: - clk_select = SMU_PHYCLK; - break; - case amd_pp_mem_clock: - clk_select = SMU_UCLK; - break; - default: - dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); - ret = -EINVAL; - break; - } - - if (ret) - goto failed; - - if (clk_select == SMU_UCLK && smu->disable_uclk_switch) - return 0; - - ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); - - if (clk_select == SMU_UCLK) - smu->hard_min_uclk_req_from_dal = clk_freq; - } - -failed: - return ret; -} - uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) { if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) @@ -1226,7 +1158,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t tach_period; int ret; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; ret = smu_v13_0_auto_fan_control(smu, 0); @@ -1647,45 +1579,6 @@ out: return ret; } -int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, - enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) -{ - int ret = 0, clk_id = 0; - uint32_t param; - - if (min <= 0 && max <= 0) - return -EINVAL; - - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) - return 0; - - clk_id = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_CLK, - clk_type); - if (clk_id < 0) - return clk_id; - - if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, - param, NULL); - if (ret) - return ret; - } - - if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - param, NULL); - if (ret) - return ret; - } - - return ret; -} - int smu_v13_0_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { @@ -2493,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, &dpm_context->dpm_tables.pcie_table; int num_of_levels = pcie_table->num_of_link_levels; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; if (!num_of_levels) return 0; @@ -2509,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } } - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; - } - - return 0; + return ret; } int smu_v13_0_disable_pmfw_state(struct smu_context *smu) @@ -2595,3 +2497,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, return ret; } + +void smu_v13_0_reset_custom_level(struct smu_context *smu) +{ + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + + pstate_table->uclk_pstate.custom.min = 0; + pstate_table->uclk_pstate.custom.max = 0; + pstate_table->gfxclk_pstate.custom.min = 0; + pstate_table->gfxclk_pstate.custom.max = 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 5a9711e8cf68..e084ed99ec0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + uint32_t smu_pcie_arg; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_0_print_clk_levels, .force_clk_levels = smu_v13_0_0_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index 238bd71baa6d..02a455a31c25 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -187,28 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void) return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); } -static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu) +static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) { struct smu_table_context *smu_table = &smu->smu_table; - uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; - struct smu_table *table = &smu_table->driver_table; + uint16_t max_speed; + uint8_t max_width; int ret; - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL); - if (ret) { - dev_info(smu->adev->dev, - "Failed to export static metrics table!\n"); - return ret; - } - - amdgpu_asic_invalidate_hdp(smu->adev, NULL); - memcpy(smu_table->metrics_table, table->cpu_addr, table_size); + if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) { + max_width = (uint8_t)static_metrics->MaxXgmiWidth; + max_speed = (uint16_t)static_metrics->MaxXgmiBitrate; + ret = 0; + } else { + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; - return 0; + ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); + if (!ret) { + max_width = (uint8_t)metrics->XgmiWidth; + max_speed = (uint16_t)metrics->XgmiBitrate; + } + } + if (!ret) + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); } int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; struct PPTable_t *pptable = @@ -217,7 +223,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) int ret, i; if (!pptable->Init) { - ret = smu_v13_0_12_get_static_metrics_table(smu); + ret = smu_v13_0_6_get_static_metrics_table(smu); if (ret) return ret; @@ -257,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (ret) return ret; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + } + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; + smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -283,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; int xcc_id; /* For clocks with multiple instances, only report the first one */ @@ -339,14 +356,70 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, break; } - return ret; + return 0; +} + +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) +{ + const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + MetricsTable_t *metrics; + int inst, j, k, idx; + u32 inst_mask; + + metrics = (MetricsTable_t *)smu_metrics; + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instance */ + inst = GET_INST(VCN, k); + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(metrics-> + JpegBusy[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(metrics->VcnBusy[inst]); + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + metrics->VclkFrequency[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + metrics->DclkFrequency[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + metrics->SocclkFrequency[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(metrics->UclkFrequency); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]); + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } + idx++; + } + + return sizeof(*xcp_metrics); } -ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_7 *gpu_metrics = - (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_8 *gpu_metrics = + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; u8 num_jpeg_rings_gpu_metrics; @@ -354,10 +427,9 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) struct amdgpu_xcp *xcp; u32 inst_mask; - metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); - memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t)); + metrics = (MetricsTable_t *)smu_metrics; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); gpu_metrics->temperature_hotspot = SMUQ10_ROUND(metrics->MaxSocketTemperature); @@ -436,13 +508,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); for (i = 0; i < NUM_XGMI_LINKS; i++) { - gpu_metrics->xgmi_read_data_acc[i] = + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); - gpu_metrics->xgmi_write_data_acc[i] = + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); ret = amdgpu_get_xgmi_link_status(adev, i); if (ret >= 0) - gpu_metrics->xgmi_link_status[i] = ret; + gpu_metrics->xgmi_link_status[j] = ret; } gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; @@ -474,6 +549,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) SMUQ10_ROUND(metrics->GfxBusy[inst]); gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = + SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); + } idx++; } } @@ -484,7 +569,6 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) gpu_metrics->firmware_timestamp = metrics->Timestamp; *table = (void *)gpu_metrics; - kfree(metrics); return sizeof(*gpu_metrics); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 682646068000..9cc294f4708b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -101,25 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define MCA_BANK_IPID(_ip, _hwid, _type) \ [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } -#define SMU_CAP(x) SMU_13_0_6_CAPS_##x - -enum smu_v13_0_6_caps { - SMU_CAP(DPM), - SMU_CAP(DPM_POLICY), - SMU_CAP(OTHER_END_METRICS), - SMU_CAP(SET_UCLK_MAX), - SMU_CAP(PCIE_METRICS), - SMU_CAP(HST_LIMIT_METRICS), - SMU_CAP(MCA_DEBUG_MODE), - SMU_CAP(PER_INST_METRICS), - SMU_CAP(CTF_LIMIT), - SMU_CAP(RMA_MSG), - SMU_CAP(ACA_SYND), - SMU_CAP(SDMA_RESET), - SMU_CAP(STATIC_METRICS), - SMU_CAP(ALL), -}; - struct mca_bank_ipid { enum amdgpu_mca_ip ip; uint16_t hwid; @@ -195,6 +176,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), + MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0), + MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0), }; // clang-format on @@ -300,8 +283,8 @@ static inline void smu_v13_0_6_cap_clear(struct smu_context *smu, dpm_context->caps &= ~BIT_ULL(cap); } -static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu, - enum smu_v13_0_6_caps cap) +bool smu_v13_0_6_cap_supported(struct smu_context *smu, + enum smu_v13_0_6_caps cap) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -325,12 +308,15 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu) if (fw_ver >= 0x05550E00) smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); - if (fw_ver >= 0x05551000) - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); if (fw_ver >= 0x05550B00) smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); if (fw_ver >= 0x5551200) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); + if (fw_ver >= 0x5551600) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } } static void smu_v13_0_12_init_caps(struct smu_context *smu) @@ -342,7 +328,6 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) SMU_CAP(RMA_MSG), SMU_CAP(ACA_SYND), SMU_CAP(OTHER_END_METRICS), - SMU_CAP(HST_LIMIT_METRICS), SMU_CAP(PER_INST_METRICS) }; uint32_t fw_ver = smu->smc_fw_version; @@ -357,6 +342,14 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver >= 0x00561E00) smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + + if (fw_ver >= 0x00562500) + smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + + if (fw_ver >= 0x04560100) { + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } } static void smu_v13_0_6_init_caps(struct smu_context *smu) @@ -387,8 +380,6 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); - if (fw_ver >= 0x04556F00) - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); if (fw_ver >= 0x04556A00) smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); } else { @@ -408,8 +399,17 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); if (fw_ver < 0x00555600) smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); - if (pgm == 0 && fw_ver >= 0x557900) + if ((pgm == 7 && fw_ver >= 0x7550E00) || + (pgm == 0 && fw_ver >= 0x00557E00)) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + if ((pgm == 0 && fw_ver >= 0x00557F01) || + (pgm == 7 && fw_ver >= 0x7551000)) { + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + } + if ((pgm == 0 && fw_ver >= 0x00558000) || + (pgm == 7 && fw_ver >= 0x7551000)) + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); } if (((pgm == 7) && (fw_ver >= 0x7550700)) || ((pgm == 0) && (fw_ver >= 0x00557900)) || @@ -533,7 +533,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) { @@ -690,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, - void *metrics_table, bool bypass_cache) +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache) { struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; @@ -755,9 +755,48 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, return pm_metrics->common_header.structure_size; } +static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; +} + +int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; + struct smu_table *table = &smu_table->driver_table; + int ret; + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL); + if (ret) { + dev_info(smu->adev->dev, + "Failed to export static metrics table!\n"); + return ret; + } + + amdgpu_asic_invalidate_hdp(smu->adev, NULL); + memcpy(smu_table->metrics_table, table->cpu_addr, table_size); + + return 0; +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; + StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; @@ -766,8 +805,11 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + uint16_t max_speed; + uint8_t max_width; - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) return smu_v13_0_12_setup_driver_pptable(smu); /* Store one-time values in driver PPTable */ @@ -800,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); + max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version); + max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version); + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = @@ -821,6 +866,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0]; pptable->Init = true; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_6_get_static_metrics_table(smu); + if (ret) + return ret; + smu_v13_0_6_fill_static_metrics_table(smu, static_metrics); + } } return 0; @@ -830,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - uint32_t clock_limit = 0, param; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk, param; int ret = 0, clk_id = 0; - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + /* Use dpm tables, if data is already fetched */ + if (pptable->Init) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK: - if (pptable->Init) - clock_limit = pptable->UclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.uclk_table; break; case SMU_GFXCLK: case SMU_SCLK: - if (pptable->Init) - clock_limit = pptable->MinGfxclkFrequency; + dpm_table = &dpm_context->dpm_tables.gfx_table; break; case SMU_SOCCLK: - if (pptable->Init) - clock_limit = pptable->SocclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.soc_table; break; case SMU_FCLK: - if (pptable->Init) - clock_limit = pptable->FclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.fclk_table; break; case SMU_VCLK: - if (pptable->Init) - clock_limit = pptable->VclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.vclk_table; break; case SMU_DCLK: - if (pptable->Init) - clock_limit = pptable->DclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.dclk_table; break; default: - break; + return -EINVAL; } - if (min) - *min = clock_limit; + min_clk = dpm_table->min; + max_clk = dpm_table->max; + if (min) + *min = min_clk; if (max) - *max = clock_limit; + *max = max_clk; - return 0; + if (min_clk && max_clk) + return 0; } if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { @@ -1150,7 +1201,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, if (ret) return ret; - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) return smu_v13_0_12_get_smu_metrics_data(smu, member, value); /* For clocks with multiple instances, only report the first one */ @@ -1335,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, return ret; } - min_clk = pstate_table->gfxclk_pstate.curr.min; - max_clk = pstate_table->gfxclk_pstate.curr.max; + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + min_clk = single_dpm_table->min; + max_clk = single_dpm_table->max; if (now < SMU_13_0_6_DSCLK_THRESHOLD) { size += sysfs_emit_at(buf, size, "S: %uMhz *\n", @@ -1624,6 +1677,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; int ret = 0; if (amdgpu_ras_intr_triggered()) @@ -1668,6 +1722,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu, ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VDDBOARD: + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + *(uint32_t *)data = dpm_context->board_volt; + *size = 4; + break; + } else { + ret = -EOPNOTSUPP; + break; + } case AMDGPU_PP_SENSOR_GPU_AVG_POWER: default: ret = -EOPNOTSUPP; @@ -1935,7 +1998,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu, return ret; pstate_table->uclk_pstate.curr.max = uclk_table->max; } - pstate_table->uclk_pstate.custom.max = 0; + smu_v13_0_reset_custom_level(smu); return 0; case AMD_DPM_FORCED_LEVEL_MANUAL: @@ -2148,7 +2211,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, smu, SMU_UCLK, min_clk, max_clk, false); if (ret) return ret; - pstate_table->uclk_pstate.custom.max = 0; + smu_v13_0_reset_custom_level(smu); } break; case PP_OD_COMMIT_DPM_TABLE: @@ -2491,11 +2554,131 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) return pcie_gen_to_speed(speed_level + 1); } +static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, + void *table) +{ + const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; + int version = smu_v13_0_6_get_metrics_version(smu); + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; + struct amdgpu_device *adev = smu->adev; + int ret, inst, i, j, k, idx; + MetricsTableV0_t *metrics_v0; + MetricsTableV1_t *metrics_v1; + MetricsTableV2_t *metrics_v2; + struct amdgpu_xcp *xcp; + u32 inst_mask; + bool per_inst; + + if (!table) + return sizeof(*xcp_metrics); + + for_each_xcp(adev->xcp_mgr, xcp, i) { + if (xcp->id == xcp_id) + break; + } + if (i == adev->xcp_mgr->num_xcps) + return -EINVAL; + + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); + + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); + if (!metrics_v0) + return -ENOMEM; + + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); + if (ret) { + kfree(metrics_v0); + return ret; + } + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); + goto out; + } + + metrics_v1 = (MetricsTableV1_t *)metrics_v0; + metrics_v2 = (MetricsTableV2_t *)metrics_v0; + + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); + + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + /* Both JPEG and VCN has same instances */ + inst = GET_INST(VCN, k); + + for (j = 0; j < num_jpeg_rings; ++j) { + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + xcp_metrics->vcn_busy[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(VclkFrequency, version)[inst]); + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(DclkFrequency, version)[inst]); + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( + GET_METRIC_FIELD(SocclkFrequency, version)[inst]); + + idx++; + } + + xcp_metrics->current_uclk = + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); + + if (per_inst) { + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); + idx = 0; + for_each_inst(k, inst_mask) { + inst = GET_INST(GC, k); + xcp_metrics->current_gfxclk[idx] = + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, + version)[inst]); + + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + xcp_metrics->gfx_below_host_limit_ppt_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + xcp_metrics->gfx_below_host_limit_thm_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + xcp_metrics->gfx_low_utilization_acc + [idx] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + xcp_metrics->gfx_below_host_limit_total_acc + [idx] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); + } + idx++; + } + } +out: + kfree(metrics_v0); + + return sizeof(*xcp_metrics); +} + static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_7 *gpu_metrics = - (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table; + struct gpu_metrics_v1_8 *gpu_metrics = + (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; int version = smu_v13_0_6_get_metrics_version(smu); int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; @@ -2504,24 +2687,29 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table MetricsTableV2_t *metrics_v2; struct amdgpu_xcp *xcp; u16 link_width_level; + ssize_t num_bytes; u8 num_jpeg_rings; u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); if (ret) { kfree(metrics_v0); return ret; } - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) - return smu_v13_0_12_get_gpu_metrics(smu, table); + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { + num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); + kfree(metrics_v0); + return num_bytes; + } metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7); + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); gpu_metrics->temperature_hotspot = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); @@ -2631,13 +2819,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version)); for (i = 0; i < NUM_XGMI_LINKS; i++) { - gpu_metrics->xgmi_read_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); - gpu_metrics->xgmi_write_data_acc[i] = - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); + j = amdgpu_xgmi_get_ext_link(adev, i); + if (j < 0 || j >= NUM_XGMI_LINKS) + continue; + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND( + GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); ret = amdgpu_get_xgmi_link_status(adev, i); if (ret >= 0) - gpu_metrics->xgmi_link_status[i] = ret; + gpu_metrics->xgmi_link_status[j] = ret; } gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; @@ -2674,13 +2865,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, version)[inst]); - - if (smu_v13_0_6_cap_supported( - smu, SMU_CAP(HST_LIMIT_METRICS))) - gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD - (GfxclkBelowHostLimitAcc, version) - [inst]); + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = + SMUQ10_ROUND + (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); + } idx++; } } @@ -2859,14 +3057,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu) return ret; } +static int smu_v13_0_6_link_reset(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_4, NULL); + return ret; +} + static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu) { return true; } -static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu) +static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu) { - return true; + struct amdgpu_device *adev = smu->adev; + int var = (adev->pdev->device & 0xF); + + if (var == 0x1) + return true; + + return false; } static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, @@ -2939,6 +3152,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask) return ret; } +static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) +{ + int ret = 0; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL); + if (ret) + dev_err(smu->adev->dev, + "failed to send ResetVCN event with mask 0x%x\n", + inst_mask); + return ret; +} + + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3599,11 +3825,13 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, .get_pm_metrics = smu_v13_0_6_get_pm_metrics, + .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, - .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, + .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, .mode1_reset = smu_v13_0_6_mode1_reset, .mode2_reset = smu_v13_0_6_mode2_reset, + .link_reset = smu_v13_0_6_link_reset, .wait_for_event = smu_v13_0_wait_for_event, .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, @@ -3611,6 +3839,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .send_rma_reason = smu_v13_0_6_send_rma_reason, .reset_sdma = smu_v13_0_6_reset_sdma, .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, + .dpm_reset_vcn = smu_v13_0_6_reset_vcn, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index 83745909e564..67b30674fd31 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -26,6 +26,7 @@ #define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2 #define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4 #define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2 +#define SMU_CAP(x) SMU_13_0_6_CAPS_##x typedef enum { /*0*/ METRICS_VERSION_V0 = 0, @@ -51,6 +52,40 @@ struct PPTable_t { bool Init; }; +enum smu_v13_0_6_caps { + SMU_CAP(DPM), + SMU_CAP(DPM_POLICY), + SMU_CAP(OTHER_END_METRICS), + SMU_CAP(SET_UCLK_MAX), + SMU_CAP(PCIE_METRICS), + SMU_CAP(MCA_DEBUG_MODE), + SMU_CAP(PER_INST_METRICS), + SMU_CAP(CTF_LIMIT), + SMU_CAP(RMA_MSG), + SMU_CAP(ACA_SYND), + SMU_CAP(SDMA_RESET), + SMU_CAP(STATIC_METRICS), + SMU_CAP(HST_LIMIT_METRICS), + SMU_CAP(BOARD_VOLTAGE), + SMU_CAP(PLDM_VERSION), + SMU_CAP(ALL), +}; + extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); +bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); +int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache); +bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); +int smu_v13_0_12_get_max_metrics_size(void); +int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); +int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, uint32_t *value); +ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); +ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, + struct amdgpu_xcp *xcp, void *table, + void *smu_metrics); +extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; +extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c8f4f6fb4083..c96fa5e49ed6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) PPTable_t *driver_ppt = smu->smu_table.driver_pptable; SkuTable_t *skutable = &driver_ppt->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + int link_level; + uint32_t smu_pcie_arg; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_7_print_clk_levels, .force_clk_levels = smu_v13_0_7_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 76c1adda83db..f9b0938c57ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 84f9b007b59f..fe00c84b1cc6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + u32 min, + u32 max, + bool __always_unused automatic) { - enum smu_message_type msg_set_min, msg_set_max; - int ret = 0; + enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT; + enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT; + int ret = -EINVAL; if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) return -EINVAL; @@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, msg_set_min = SMU_MSG_SetHardMinVcn1; msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; + case SMU_ISPICLK: + msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq; + break; + case SMU_ISPXCLK: + msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq; + break; default: return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); - if (ret) - return ret; + if (min && msg_set_min != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + + if (max && msg_set_max != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); - return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, - max, NULL); + return ret; } static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, @@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, if (ret) break; - ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); break; default: ret = -EINVAL; @@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_min, - sclk_max); + sclk_max, + false); if (ret) return ret; @@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + false); if (ret) return ret; } @@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + false); if (ret) return ret; } @@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK1, vclk1_min, - vclk1_max); + vclk1_max, + false); if (ret) return ret; } @@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } @@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK1, dclk1_min, - dclk1_max); + dclk1_max, + false); if (ret) return ret; } @@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 0, NULL); } +static int smu_v14_0_0_set_isp_enable(struct smu_context *smu, + bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile, + ISP_ALL_TILES_MASK, NULL); +} + static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, @@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, + .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, .set_mall_enable = smu_v14_0_common_set_mall_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index f7cfe1f35cae..3aea32baea3d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -79,6 +79,7 @@ #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 +#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), @@ -501,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_14_0_dpm_table *dpm_table; - struct smu_14_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -618,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - - if (link_level == 0) - link_level++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -1052,6 +1030,10 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, od_min_setting = overdrive_lowerlimits->FanMinimumPwm; od_max_setting = overdrive_upperlimits->FanMinimumPwm; break; + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; + break; default: od_min_setting = od_max_setting = INT_MAX; break; @@ -1330,6 +1312,24 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, min_value, max_value); break; + case SMU_OD_FAN_ZERO_RPM_ENABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_ZERO_FAN_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanZeroRpmEnable); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", + min_value, max_value); + break; + case SMU_OD_RANGE: if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && @@ -1464,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_14_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - int num_of_levels = pcie_table->num_of_link_levels; + int num_of_levels; uint32_t smu_pcie_arg; - int ret, i; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + num_of_levels = pcie_table->num_of_link_levels; if (!num_of_levels) return 0; @@ -1482,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } - } - - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, smu_pcie_arg, NULL); - if (ret) - return ret; + if (ret) + break; + } + } } - return 0; + return ret; } static const struct smu_temperature_range smu14_thermal_policy[] = { @@ -2270,7 +2301,9 @@ static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu) OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET; } static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, @@ -2349,6 +2382,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) user_od_table_bak.OverDriveTable.FanTargetTemperature; user_od_table->OverDriveTable.FanMinimumPwm = user_od_table_bak.OverDriveTable.FanMinimumPwm; + user_od_table->OverDriveTable.FanZeroRpmEnable = + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; } smu_v14_0_2_set_supported_od_feature_mask(smu); @@ -2396,6 +2431,11 @@ static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long inp od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + od_table->OverDriveTable.FanZeroRpmEnable = + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; case PP_OD_EDIT_ACOUSTIC_LIMIT: od_table->OverDriveTable.AcousticLimitRpmThreshold = boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold; @@ -2678,6 +2718,27 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); break; + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); + break; + case PP_OD_RESTORE_DEFAULT_TABLE: if (size == 1) { ret = smu_v14_0_2_od_restore_table_single(smu, input[0]); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index d834d134ad2b..59f9abd0f7b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, #define SMU_RESP_BUSY_OTHER 0xFC #define SMU_RESP_DEBUG_END 0xFB +#define SMU_RESP_UNEXP (~0U) /** * __smu_cmn_poll_stat -- poll for a status from the SMU * @smu: a pointer to SMU context @@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, dev_err_ratelimited(adev->dev, "SMU: I'm debugging!"); break; + case SMU_RESP_UNEXP: + if (amdgpu_device_bus_status_check(smu->adev)) { + /* print error immediately if device is off the bus */ + dev_err(adev->dev, + "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", + reg_c2pmsg_90, msg_index, param, message); + break; + } + fallthrough; default: dev_err_ratelimited(adev->dev, "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", @@ -1051,70 +1061,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu) false); } -void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) -{ - struct metrics_table_header *header = (struct metrics_table_header *)table; - uint16_t structure_size; - -#define METRICS_VERSION(a, b) ((a << 16) | b) - - switch (METRICS_VERSION(frev, crev)) { - case METRICS_VERSION(1, 0): - structure_size = sizeof(struct gpu_metrics_v1_0); - break; - case METRICS_VERSION(1, 1): - structure_size = sizeof(struct gpu_metrics_v1_1); - break; - case METRICS_VERSION(1, 2): - structure_size = sizeof(struct gpu_metrics_v1_2); - break; - case METRICS_VERSION(1, 3): - structure_size = sizeof(struct gpu_metrics_v1_3); - break; - case METRICS_VERSION(1, 4): - structure_size = sizeof(struct gpu_metrics_v1_4); - break; - case METRICS_VERSION(1, 5): - structure_size = sizeof(struct gpu_metrics_v1_5); - break; - case METRICS_VERSION(1, 6): - structure_size = sizeof(struct gpu_metrics_v1_6); - break; - case METRICS_VERSION(1, 7): - structure_size = sizeof(struct gpu_metrics_v1_7); - break; - case METRICS_VERSION(2, 0): - structure_size = sizeof(struct gpu_metrics_v2_0); - break; - case METRICS_VERSION(2, 1): - structure_size = sizeof(struct gpu_metrics_v2_1); - break; - case METRICS_VERSION(2, 2): - structure_size = sizeof(struct gpu_metrics_v2_2); - break; - case METRICS_VERSION(2, 3): - structure_size = sizeof(struct gpu_metrics_v2_3); - break; - case METRICS_VERSION(2, 4): - structure_size = sizeof(struct gpu_metrics_v2_4); - break; - case METRICS_VERSION(3, 0): - structure_size = sizeof(struct gpu_metrics_v3_0); - break; - default: - return; - } - -#undef METRICS_VERSION - - memset(header, 0xFF, structure_size); - - header->format_revision = frev; - header->content_revision = crev; - header->structure_size = structure_size; - -} - int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index a020277dec3e..a608cdbdada4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,6 +40,31 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ + struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*tmp); \ + } while (0) + +#define smu_cmn_init_partition_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ + } while (0) + extern const int link_speed[]; /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ @@ -125,8 +150,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu, int smu_cmn_get_combo_pptable(struct smu_context *smu); -void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); - int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); |