diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1461 |
1 files changed, 974 insertions, 487 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7753a2e64d41..d100bb7a137c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,8 @@ * Alex Deucher * Jerome Glisse */ + +#include <linux/aperture.h> #include <linux/power_supply.h> #include <linux/kthread.h> #include <linux/module.h> @@ -35,10 +37,9 @@ #include <linux/pci-p2pdma.h> #include <linux/apple-gmux.h> -#include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_client_event.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/device.h> @@ -74,6 +75,7 @@ #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" #include "amdgpu_virt.h" +#include "amdgpu_dev_coredump.h" #include <linux/suspend.h> #include <drm/task_barrier.h> @@ -143,13 +145,70 @@ const char *amdgpu_asic_name[] = { "LAST", }; +#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0) +/* + * Default init level where all blocks are expected to be initialized. This is + * the level of initialization expected by default and also after a full reset + * of the device. + */ +struct amdgpu_init_level amdgpu_init_default = { + .level = AMDGPU_INIT_LEVEL_DEFAULT, + .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, +}; + +struct amdgpu_init_level amdgpu_init_recovery = { + .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY, + .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL, +}; + +/* + * Minimal blocks needed to be initialized before a XGMI hive can be reset. This + * is used for cases like reset on initialization where the entire hive needs to + * be reset before first use. + */ +struct amdgpu_init_level amdgpu_init_minimal_xgmi = { + .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI, + .hwini_ip_block_mask = + BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) | + BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) | + BIT(AMD_IP_BLOCK_TYPE_PSP) +}; + +static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev, + enum amd_ip_block_type block) +{ + return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0; +} + +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl) +{ + switch (lvl) { + case AMDGPU_INIT_LEVEL_MINIMAL_XGMI: + adev->init_lvl = &amdgpu_init_minimal_xgmi; + break; + case AMDGPU_INIT_LEVEL_RESET_RECOVERY: + adev->init_lvl = &amdgpu_init_recovery; + break; + case AMDGPU_INIT_LEVEL_DEFAULT: + fallthrough; + default: + adev->init_lvl = &amdgpu_init_default; + break; + } +} + +static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); +static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, + void *data); + /** * DOC: pcie_replay_count * * The amdgpu driver provides a sysfs API for reporting the total number - * of PCIe replays (NAKs) + * of PCIe replays (NAKs). * The file pcie_replay_count is used for this and returns the total - * number of replays as a sum of the NAKs generated and NAKs received + * number of replays as a sum of the NAKs generated and NAKs received. */ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, @@ -224,6 +283,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); } +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->suspend) { + r = ip_block->version->funcs->suspend(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "suspend of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = false; + return 0; +} + +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (ip_block->version->funcs->resume) { + r = ip_block->version->funcs->resume(ip_block); + if (r) { + dev_err(ip_block->adev->dev, + "resume of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + return r; + } + } + + ip_block->status.hw = true; + return 0; +} + /** * DOC: board_info * @@ -324,6 +419,9 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) { struct amdgpu_device *adev = drm_to_adev(dev); + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + return false; + if (adev->has_pr3 || ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) return true; @@ -335,16 +433,93 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) * * @dev: drm_device pointer * - * Returns true if the device supporte BACO, - * otherwise return false. + * Return: + * 1 if the device supports BACO; + * 3 if the device supports MACO (only works if BACO is supported) + * otherwise return 0. */ -bool amdgpu_device_supports_baco(struct drm_device *dev) +int amdgpu_device_supports_baco(struct drm_device *dev) { struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_asic_supports_baco(adev); } +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) +{ + struct drm_device *dev; + int bamaco_support; + + dev = adev_to_drm(adev); + + adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; + bamaco_support = amdgpu_device_supports_baco(dev); + + switch (amdgpu_runtime_pm) { + case 2: + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); + } else if (bamaco_support == BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); + } + break; + case 1: + if (bamaco_support & BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Forcing BACO for runtime pm\n"); + } + break; + case -1: + case -2: + if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_PX; + dev_info(adev->dev, "Using ATPX for runtime pm\n"); + } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; + dev_info(adev->dev, "Using BOCO for runtime pm\n"); + } else { + if (!bamaco_support) + goto no_runtime_pm; + + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + /* BACO are not supported on vega20 and arctrus */ + break; + case CHIP_VEGA10: + /* enable BACO as runpm mode if noretry=0 */ + if (!adev->gmc.noretry) + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + default: + /* enable BACO as runpm mode on CI+ */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + } + + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Using BAMACO for runtime pm\n"); + } else { + dev_info(adev->dev, "Using BACO for runtime pm\n"); + } + } + } + break; + case 0: + dev_info(adev->dev, "runtime pm is manually disabled\n"); + break; + default: + break; + } + +no_runtime_pm: + if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) + dev_info(adev->dev, "Runtime PM not available\n"); +} /** * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support @@ -407,7 +582,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, } /** - * amdgpu_device_aper_access - access vram by vram aperature + * amdgpu_device_aper_access - access vram by vram aperture * * @adev: amdgpu_device pointer * @pos: offset of the buffer in vram @@ -498,7 +673,7 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) * here is that the GPU reset is not running on another thread in parallel. * * For this we trylock the read side of the reset semaphore, if that succeeds - * we know that the reset is not running in paralell. + * we know that the reset is not running in parallel. * * If the trylock fails we assert that we are either already holding the read * side of the lock or are the reset thread itself and hold the write side of @@ -599,7 +774,7 @@ uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, GC_HWIP, false, &rlcg_flag)) { - ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id); + ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id)); } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && down_read_trylock(&adev->reset_domain->sem)) { @@ -730,7 +905,7 @@ void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, GC_HWIP, true, &rlcg_flag)) { - amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id); + amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id)); } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && down_read_trylock(&adev->reset_domain->sem)) { @@ -1228,6 +1403,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) amdgpu_asic_pre_asic_init(adev); if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) || amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { amdgpu_psp_wait_for_bootloader(adev); ret = amdgpu_atomfirmware_asic_init(adev, true); @@ -1402,13 +1579,17 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev) */ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) { - unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); + unsigned long flags, offset; + spin_lock_irqsave(&adev->wb.lock, flags); + offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); + spin_unlock_irqrestore(&adev->wb.lock, flags); *wb = offset << 3; /* convert to dw offset */ return 0; } else { + spin_unlock_irqrestore(&adev->wb.lock, flags); return -EINVAL; } } @@ -1423,9 +1604,13 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) */ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) { + unsigned long flags; + wb >>= 3; + spin_lock_irqsave(&adev->wb.lock, flags); if (wb < adev->wb.num_wb) __clear_bit(wb, adev->wb.used); + spin_unlock_irqrestore(&adev->wb.lock, flags); } /** @@ -1455,7 +1640,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space,please check!!\n"); + DRM_WARN("System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && @@ -1554,7 +1739,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) uint32_t fw_ver; err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); - /* force vPost if error occured */ + /* force vPost if error occurred */ if (err) return true; @@ -1566,7 +1751,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* Don't post if we need to reset whole hive on init */ - if (adev->gmc.xgmi.pending_reset) + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) return false; if (adev->has_hw_reset) { @@ -1827,6 +2012,8 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) */ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) { + int i; + if (amdgpu_sched_jobs < 4) { dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); @@ -1881,6 +2068,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); + for (i = 0; i < MAX_XCP; i++) + adev->enforce_isolation[i] = !!enforce_isolation; + return 0; } @@ -1981,7 +2171,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev, if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) continue; r = adev->ip_blocks[i].version->funcs->set_clockgating_state( - (void *)adev, state); + &adev->ip_blocks[i], state); if (r) DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2015,7 +2205,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev, if (!adev->ip_blocks[i].version->funcs->set_powergating_state) continue; r = adev->ip_blocks[i].version->funcs->set_powergating_state( - (void *)adev, state); + &adev->ip_blocks[i], state); if (r) DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2065,9 +2255,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, if (!adev->ip_blocks[i].status.valid) continue; if (adev->ip_blocks[i].version->type == block_type) { - r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); - if (r) - return r; + if (adev->ip_blocks[i].version->funcs->wait_for_idle) { + r = adev->ip_blocks[i].version->funcs->wait_for_idle( + &adev->ip_blocks[i]); + if (r) + return r; + } break; } } @@ -2076,26 +2269,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_idle - is the hardware IP idle + * amdgpu_device_ip_is_valid - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) * - * Check if the hardware IP is idle or not. - * Returns true if it the IP is idle, false if not. + * Check if the hardware IP is enable or not. + * Returns true if it the IP is enable, false if not. */ -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type) +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) { int i; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; if (adev->ip_blocks[i].version->type == block_type) - return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); + return adev->ip_blocks[i].status.valid; } - return true; + return false; } @@ -2174,8 +2365,10 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, break; } - DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, - ip_block_version->funcs->name); + dev_info(adev->dev, "detected ip block number %d <%s>\n", + adev->num_ip_blocks, ip_block_version->funcs->name); + + adev->ip_blocks[adev->num_ip_blocks].adev = adev; adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; @@ -2191,7 +2384,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, * the module parameter virtual_display. This feature provides a virtual * display hardware on headless boards or in virtualized environments. * This function parses and validates the configuration string specified by - * the user and configues the virtual display configuration (number of + * the user and configures the virtual display configuration (number of * virtual connectors, crtcs, etc.) specified. */ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) @@ -2254,14 +2447,13 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * * Parses the asic configuration parameters specified in the gpu info - * firmware and makes them availale to the driver for use in configuring + * firmware and makes them available to the driver for use in configuring * the asic. * Returns 0 on success, -EINVAL on failure. */ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[40]; int err; const struct gpu_info_firmware_header_v1_0 *hdr; @@ -2295,12 +2487,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) break; } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); - err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, + AMDGPU_UCODE_OPTIONAL, + "amdgpu/%s_gpu_info.bin", chip_name); if (err) { dev_err(adev->dev, - "Failed to get gpu_info firmware \"%s\"\n", - fw_name); + "Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n", + chip_name); goto out; } @@ -2315,7 +2508,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) le32_to_cpu(hdr->header.ucode_array_offset_bytes)); /* - * Should be droped when DAL no longer needs it. + * Should be dropped when DAL no longer needs it. */ if (adev->asic_type == CHIP_NAVI12) goto parse_soc_bounding_box; @@ -2383,6 +2576,7 @@ out: */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { + struct amdgpu_ip_block *ip_block; struct pci_dev *parent; int i, r; bool total; @@ -2472,25 +2666,25 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) total = true; for (i = 0; i < adev->num_ip_blocks; i++) { + ip_block = &adev->ip_blocks[i]; + if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_WARN("disabled ip block: %d <%s>\n", i, adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; - } else { - if (adev->ip_blocks[i].version->funcs->early_init) { - r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); - if (r == -ENOENT) { - adev->ip_blocks[i].status.valid = false; - } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - total = false; - } else { - adev->ip_blocks[i].status.valid = true; - } + } else if (ip_block->version->funcs->early_init) { + r = ip_block->version->funcs->early_init(ip_block); + if (r == -ENOENT) { + adev->ip_blocks[i].status.valid = false; + } else if (r) { + DRM_ERROR("early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + total = false; } else { adev->ip_blocks[i].status.valid = true; } + } else { + adev->ip_blocks[i].status.valid = true; } /* get the vbios after the asic_funcs are set up */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { @@ -2520,7 +2714,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (!total) return -ENODEV; - amdgpu_amdkfd_device_probe(adev); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (ip_block->status.valid != false) + amdgpu_amdkfd_device_probe(adev); + adev->cg_flags &= amdgpu_cg_mask; adev->pg_flags &= amdgpu_pg_mask; @@ -2536,10 +2733,13 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2561,7 +2761,10 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -2584,6 +2787,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) continue; + if (!amdgpu_ip_member_of_hwini(adev, + AMD_IP_BLOCK_TYPE_PSP)) + break; + if (!adev->ip_blocks[i].status.sw) continue; @@ -2592,22 +2799,18 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) break; if (amdgpu_in_reset(adev) || adev->in_suspend) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } } else { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } - - adev->ip_blocks[i].status.hw = true; break; } } @@ -2689,6 +2892,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) */ static int amdgpu_device_ip_init(struct amdgpu_device *adev) { + bool init_badpage; int i, r; r = amdgpu_ras_init(adev); @@ -2698,17 +2902,23 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; - r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); - if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - goto init_failed; + if (adev->ip_blocks[i].version->funcs->sw_init) { + r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); + if (r) { + DRM_ERROR("sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + goto init_failed; + } } adev->ip_blocks[i].status.sw = true; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { /* need to do common hw init early so everything is set up for gmc */ - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2725,7 +2935,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); goto init_failed; } - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); goto init_failed; @@ -2798,7 +3008,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) * Note: theoretically, this should be called before all vram allocations * to protect retired page from abusing */ - r = amdgpu_ras_recovery_init(adev); + init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI); + r = amdgpu_ras_recovery_init(adev, init_badpage); if (r) goto init_failed; @@ -2838,7 +3049,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, true); /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); } @@ -2857,7 +3068,7 @@ init_failed: * * Writes a reset magic value to the gart pointer in VRAM. The driver calls * this function before a GPU reset. If the value is retained after a - * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. + * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents. */ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) { @@ -2933,7 +3144,7 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], state); if (r) { DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", @@ -2970,7 +3181,7 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ - r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, + r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], state); if (r) { DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", @@ -3038,7 +3249,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->funcs->late_init) { - r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3054,7 +3265,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return r; } - amdgpu_ras_set_error_query_ready(adev, true); + if (!amdgpu_reset_in_recovery(adev)) + amdgpu_ras_set_error_query_ready(adev, true); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); @@ -3108,6 +3320,25 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) +{ + int r; + + if (!ip_block->version->funcs->hw_fini) { + DRM_ERROR("hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); + } else { + r = ip_block->version->funcs->hw_fini(ip_block); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); + } + } + + ip_block->status.hw = false; +} + /** * amdgpu_device_smu_fini_early - smu hw_fini wrapper * @@ -3117,7 +3348,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) */ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) { - int i, r; + int i; if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return; @@ -3126,13 +3357,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); break; } } @@ -3146,7 +3371,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (!adev->ip_blocks[i].version->funcs->early_fini) continue; - r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { DRM_DEBUG("early_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -3158,21 +3383,14 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) amdgpu_amdkfd_suspend(adev, false); - /* Workaroud for ASICs need to disable SMC first */ + /* Workaround for ASICs need to disable SMC first */ amdgpu_device_smu_fini_early(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.hw) continue; - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - - adev->ip_blocks[i].status.hw = false; + amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]); } if (amdgpu_sriov_vf(adev)) { @@ -3218,12 +3436,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ib_pool_fini(adev); amdgpu_seq64_fini(adev); } - - r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + if (adev->ip_blocks[i].version->funcs->sw_fini) { + r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } } adev->ip_blocks[i].status.sw = false; adev->ip_blocks[i].status.valid = false; @@ -3233,7 +3452,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.late_initialized) continue; if (adev->ip_blocks[i].version->funcs->late_fini) - adev->ip_blocks[i].version->funcs->late_fini((void *)adev); + adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]); adev->ip_blocks[i].status.late_initialized = false; } @@ -3266,7 +3485,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) WARN_ON_ONCE(adev->gfx.gfx_off_state); WARN_ON_ONCE(adev->gfx.gfx_off_req_count); - if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0)) adev->gfx.gfx_off_state = true; } @@ -3305,15 +3524,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); + if (r) return r; - } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -3351,14 +3564,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } /* skip unnecessary suspend if we do not initialize them yet */ - if (adev->gmc.xgmi.pending_reset && - !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { - adev->ip_blocks[i].status.hw = false; + if (!amdgpu_ip_member_of_hwini( + adev, adev->ip_blocks[i].version->type)) continue; - } /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just @@ -3392,13 +3600,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) continue; /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } + r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); adev->ip_blocks[i].status.hw = false; + /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { @@ -3472,10 +3676,12 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) !block->status.valid) continue; - r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) + r = block->version->funcs->hw_init(&adev->ip_blocks[i]); + if (r) { + dev_err(adev->dev, "RE-INIT-early: %s failed\n", + block->version->funcs->name); return r; + } block->status.hw = true; } } @@ -3485,7 +3691,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) { - int i, r; + struct amdgpu_ip_block *block; + int i, r = 0; static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_SMC, @@ -3500,30 +3707,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { - int j; - struct amdgpu_ip_block *block; + block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]); - for (j = 0; j < adev->num_ip_blocks; j++) { - block = &adev->ip_blocks[j]; - - if (block->version->type != ip_order[i] || - !block->status.valid || - block->status.hw) - continue; + if (!block) + continue; - if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) - r = block->version->funcs->resume(adev); - else - r = block->version->funcs->hw_init(adev); + if (block->status.valid && !block->status.hw) { + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_ip_block_resume(block); + } else { + r = block->version->funcs->hw_init(block); + } - DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); - if (r) - return r; + if (r) { + dev_err(adev->dev, "RE-INIT-late: %s failed\n", + block->version->funcs->name); + break; + } block->status.hw = true; } } - return 0; + return r; } /** @@ -3550,13 +3755,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; - } - adev->ip_blocks[i].status.hw = true; } } @@ -3568,7 +3769,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * - * First resume function for hardware IPs. The list of all the hardware + * Second resume function for hardware IPs. The list of all the hardware * IPs that make up the asic is walked and the resume callbacks are run for * all blocks except COMMON, GMC, and IH. resume puts the hardware into a * functional state after a suspend and updates the software state as @@ -3586,15 +3787,42 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) return r; + } + + return 0; +} + +/** + * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Third resume function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the resume callbacks are run for + * all DCE. resume puts the hardware into a functional state after a suspend + * and updates the software state as necessary. This function is also used + * for restoring the GPU after a GPU reset. + * + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { + r = amdgpu_ip_block_resume(&adev->ip_blocks[i]); + if (r) + return r; } - adev->ip_blocks[i].status.hw = true; } return 0; @@ -3629,6 +3857,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring->sched.ready) amdgpu_ttm_set_buffer_funcs_status(adev, true); + if (r) + return r; + + amdgpu_fence_driver_hw_init(adev); + + r = amdgpu_device_ip_resume_phase3(adev); + return r; } @@ -3859,6 +4094,27 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) adev->ram_is_direct_mapped = true; } +#if defined(CONFIG_HSA_AMD_P2P) +/** + * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled. + * + * @adev: amdgpu_device pointer + * + * return if IOMMU remapping bar address + */ +static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(adev->dev); + if (domain && (domain->type == IOMMU_DOMAIN_DMA || + domain->type == IOMMU_DOMAIN_DMA_FQ)) + return true; + + return false; +} +#endif + static const struct attribute *amdgpu_dev_attributes[] = { &dev_attr_pcie_replay_count.attr, NULL @@ -3960,11 +4216,16 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); + mutex_init(&adev->virt.rlcg_reg_lock); hash_init(adev->mn_hash); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); mutex_init(&adev->pm.stable_pstate_ctx_lock); mutex_init(&adev->benchmark_mutex); + mutex_init(&adev->gfx.reset_sem_mutex); + /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */ + mutex_init(&adev->enforce_isolation_mutex); + mutex_init(&adev->gfx.kfd_sch_mutex); amdgpu_device_init_apu_flags(adev); @@ -3981,9 +4242,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); - - INIT_LIST_HEAD(&adev->shadow_list); - mutex_init(&adev->shadow_list_lock); + spin_lock_init(&adev->wb.lock); INIT_LIST_HEAD(&adev->reset_list); @@ -3995,6 +4254,21 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_delayed_init_work_handler); INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, amdgpu_device_delay_enable_gfx_off); + /* + * Initialize the enforce_isolation work structures for each XCP + * partition. This work handler is responsible for enforcing shader + * isolation on AMD GPUs. It counts the number of emitted fences for + * each GFX and compute ring. If there are any fences, it schedules + * the `enforce_isolation_work` to be run after a delay. If there are + * no fences, it signals the Kernel Fusion Driver (KFD) to resume the + * runqueue. + */ + for (i = 0; i < MAX_XCP; i++) { + INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work, + amdgpu_gfx_enforce_isolation_handler); + adev->gfx.enforce_isolation[i].adev = adev; + adev->gfx.enforce_isolation[i].xcp_id = i; + } INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); @@ -4012,7 +4286,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, * for throttling interrupt) = 60 seconds. */ ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); + ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1); + ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); + ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -4036,7 +4313,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* * Reset domain needs to be present early, before XGMI hive discovered - * (if any) and intitialized to use reset sem and in_gpu reset flag + * (if any) and initialized to use reset sem and in_gpu reset flag * early on during init and before calling to RREG32. */ adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); @@ -4056,19 +4333,32 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_set_mcbp(adev); + /* + * By default, use default mode where all blocks are expected to be + * initialized. At present a 'swinit' of blocks is required to be + * completed before the need for a different level is detected. + */ + amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT); /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) return r; /* Get rid of things like offb */ - r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); + r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name); if (r) return r; /* Enable TMZ based on IP_VERSION */ amdgpu_gmc_tmz_set(adev); + if (amdgpu_sriov_vf(adev) && + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + amdgpu_gmc_noretry_set(adev); /* Need to get xgmi info early to decide the reset behavior*/ if (adev->gmc.xgmi.supported) { @@ -4121,20 +4411,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { if (adev->gmc.xgmi.num_physical_nodes) { dev_info(adev->dev, "Pending hive reset.\n"); - adev->gmc.xgmi.pending_reset = true; - /* Only need to init necessary block for SMU to handle the reset */ - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; - if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { - DRM_DEBUG("IP %s disabled for hw_init.\n", - adev->ip_blocks[i].version->funcs->name); - adev->ip_blocks[i].status.hw = true; - } - } + amdgpu_set_init_level(adev, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI); } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && !amdgpu_device_has_display_hardware(adev)) { r = psp_gpu_reset(adev); @@ -4242,7 +4520,7 @@ fence_driver_init: /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ - if (!adev->gmc.xgmi.pending_reset) { + if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { r = amdgpu_device_ip_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); @@ -4292,6 +4570,7 @@ fence_driver_init: amdgpu_fru_sysfs_init(adev); amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_cfg_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4319,12 +4598,16 @@ fence_driver_init: if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->gmc.xgmi.pending_reset) - queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); amdgpu_device_check_iommu_direct_map(adev); + adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; + r = register_pm_notifier(&adev->pm_nb); + if (r) + goto failed; + return 0; release_ras_con: @@ -4384,8 +4667,13 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) { dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); + + if (adev->mman.initialized) + drain_workqueue(adev->mman.bdev.wq); adev->shutdown = true; + unregister_pm_notifier(&adev->pm_nb); + /* make sure IB test finished before entering exclusive mode * to avoid preemption on IB test */ @@ -4404,9 +4692,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); - if (adev->mman.initialized) - drain_workqueue(adev->mman.bdev.wq); - if (adev->pm.sysfs_initialized) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) @@ -4415,6 +4700,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_fru_sysfs_fini(adev); amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_cfg_sysfs_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4440,8 +4726,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) int idx; bool px; - amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); + amdgpu_fence_driver_sw_fini(adev); amdgpu_ucode_release(&adev->firmware.gpu_info_fw); adev->accel_working = false; dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); @@ -4506,8 +4792,8 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) { int ret; - /* No need to evict vram on APUs for suspend to ram or s2idle */ - if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) + /* No need to evict vram on APUs unless going to S4 */ + if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) return 0; ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); @@ -4520,6 +4806,41 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) * Suspend & resume. */ /** + * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events + * @nb: notifier block + * @mode: suspend mode + * @data: data + * + * This function is called when the system is about to suspend or hibernate. + * It is used to evict resources from the device before the system goes to + * sleep while there is still access to swap. + */ +static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, + void *data) +{ + struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); + int r; + + switch (mode) { + case PM_HIBERNATION_PREPARE: + adev->in_s4 = true; + fallthrough; + case PM_SUSPEND_PREPARE: + r = amdgpu_device_evict_resources(adev); + /* + * This is considered non-fatal at this time because + * amdgpu_device_prepare() will also fatally evict resources. + * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781 + */ + if (r) + drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r); + break; + } + + return NOTIFY_DONE; +} + +/** * amdgpu_device_prepare - prepare for device suspend * * @dev: drm dev pointer @@ -4550,7 +4871,7 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; if (!adev->ip_blocks[i].version->funcs->prepare_suspend) continue; - r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); + r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) goto unprepare; } @@ -4558,7 +4879,7 @@ int amdgpu_device_prepare(struct drm_device *dev) return 0; unprepare: - adev->in_s0ix = adev->in_s3 = false; + adev->in_s0ix = adev->in_s3 = adev->in_s4 = false; return r; } @@ -4567,13 +4888,13 @@ unprepare: * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer - * @fbcon : notify the fbdev of suspend + * @notify_clients: notify in-kernel DRM clients * * Puts the hw in the suspend state (all asics). * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4593,8 +4914,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); + if (notify_clients) + drm_client_dev_suspend(adev_to_drm(adev), false); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4629,13 +4950,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) * amdgpu_device_resume - initiate device resume * * @dev: drm dev pointer - * @fbcon : notify the fbdev of resume + * @notify_clients: notify in-kernel DRM clients * * Bring the hw back to operating state (all asics). * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) { struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; @@ -4665,7 +4986,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); goto exit; } - amdgpu_fence_driver_hw_init(adev); if (!adev->in_s0ix) { r = amdgpu_amdkfd_resume(adev, adev->in_runpm); @@ -4691,8 +5011,8 @@ exit: /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - if (fbcon) - drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); + if (notify_clients) + drm_client_dev_resume(adev_to_drm(adev), false); amdgpu_ras_resume(adev); @@ -4754,7 +5074,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->funcs->check_soft_reset) adev->ip_blocks[i].status.hang = - adev->ip_blocks[i].version->funcs->check_soft_reset(adev); + adev->ip_blocks[i].version->funcs->check_soft_reset( + &adev->ip_blocks[i]); if (adev->ip_blocks[i].status.hang) { dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); asic_hang = true; @@ -4783,7 +5104,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->pre_soft_reset) { - r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -4845,7 +5166,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->soft_reset) { - r = adev->ip_blocks[i].version->funcs->soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -4874,7 +5195,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].status.hang && adev->ip_blocks[i].version->funcs->post_soft_reset) - r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); + r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]); if (r) return r; } @@ -4883,103 +5204,33 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) } /** - * amdgpu_device_recover_vram - Recover some VRAM contents - * - * @adev: amdgpu_device pointer - * - * Restores the contents of VRAM buffers from the shadows in GTT. Used to - * restore things like GPUVM page tables after a GPU reset where - * the contents of VRAM might be lost. - * - * Returns: - * 0 on success, negative error code on failure. - */ -static int amdgpu_device_recover_vram(struct amdgpu_device *adev) -{ - struct dma_fence *fence = NULL, *next = NULL; - struct amdgpu_bo *shadow; - struct amdgpu_bo_vm *vmbo; - long r = 1, tmo; - - if (amdgpu_sriov_runtime(adev)) - tmo = msecs_to_jiffies(8000); - else - tmo = msecs_to_jiffies(100); - - dev_info(adev->dev, "recover vram bo from shadow start\n"); - mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - /* If vm is compute context or adev is APU, shadow will be NULL */ - if (!vmbo->shadow) - continue; - shadow = vmbo->shadow; - - /* No need to recover an evicted BO */ - if (shadow->tbo.resource->mem_type != TTM_PL_TT || - shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || - shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) - continue; - - r = amdgpu_bo_restore_shadow(shadow, &next); - if (r) - break; - - if (fence) { - tmo = dma_fence_wait_timeout(fence, false, tmo); - dma_fence_put(fence); - fence = next; - if (tmo == 0) { - r = -ETIMEDOUT; - break; - } else if (tmo < 0) { - r = tmo; - break; - } - } else { - fence = next; - } - } - mutex_unlock(&adev->shadow_list_lock); - - if (fence) - tmo = dma_fence_wait_timeout(fence, false, tmo); - dma_fence_put(fence); - - if (r < 0 || tmo <= 0) { - dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); - return -EIO; - } - - dev_info(adev->dev, "recover vram bo from shadow done\n"); - return 0; -} - - -/** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * * @adev: amdgpu_device pointer - * @from_hypervisor: request from hypervisor + * @reset_context: amdgpu reset context pointer * * do VF FLR and reinitialize Asic * return 0 means succeeded otherwise failed */ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, - bool from_hypervisor) + struct amdgpu_reset_context *reset_context) { int r; struct amdgpu_hive_info *hive = NULL; - int retry_limit = 0; - -retry: - amdgpu_amdkfd_pre_reset(adev); - if (from_hypervisor) + if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { + if (!amdgpu_ras_get_fed_status(adev)) + amdgpu_virt_ready_to_reset(adev); + amdgpu_virt_wait_reset(adev); + clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); r = amdgpu_virt_request_full_gpu(adev, true); - else + } else { r = amdgpu_virt_reset_gpu(adev); + } if (r) return r; + + amdgpu_ras_clear_err_state(adev); amdgpu_irq_gpu_reset_resume_helper(adev); /* some sw clean up VF needs to do before recover */ @@ -4988,7 +5239,7 @@ retry: /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) - goto error; + return r; amdgpu_virt_init_data_exchange(adev); @@ -4999,51 +5250,55 @@ retry: /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_device_ip_reinit_late_sriov(adev); if (r) - goto error; + return r; hive = amdgpu_get_xgmi_hive(adev); /* Update PSP FW topology after reset */ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) r = amdgpu_xgmi_update_topology(hive, adev); - if (hive) amdgpu_put_xgmi_hive(hive); + if (r) + return r; - if (!r) { - r = amdgpu_ib_ring_tests(adev); - - amdgpu_amdkfd_post_reset(adev); - } + r = amdgpu_ib_ring_tests(adev); + if (r) + return r; -error: - if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { + if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) amdgpu_inc_vram_lost(adev); - r = amdgpu_device_recover_vram(adev); - } + + /* need to be called during full access so we can't do it later like + * bare-metal does. + */ + amdgpu_amdkfd_post_reset(adev); amdgpu_virt_release_full_gpu(adev, true); - if (AMDGPU_RETRY_SRIOV_RESET(r)) { - if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) { - retry_limit++; - goto retry; - } else - DRM_ERROR("GPU reset retry is beyond the retry limit\n"); - } + /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) + amdgpu_ras_resume(adev); - return r; + amdgpu_virt_ras_telemetry_post_reset(adev); + + return 0; } /** - * amdgpu_device_has_job_running - check if there is any job in mirror list + * amdgpu_device_has_job_running - check if there is any unfinished job * * @adev: amdgpu_device pointer * - * check if there is any job in mirror list + * check if there is any job running on the device when guest driver receives + * FLR notification from host driver. If there are still jobs running, then + * the guest driver will not respond the FLR reset. Instead, let the job hit + * the timeout and guest driver then issue the reset request. */ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) { int i; - struct drm_sched_job *job; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -5051,11 +5306,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) if (!amdgpu_ring_sched_ready(ring)) continue; - spin_lock(&ring->sched.job_list_lock); - job = list_first_entry_or_null(&ring->sched.pending_list, - struct drm_sched_job, list); - spin_unlock(&ring->sched.job_list_lock); - if (job) + if (amdgpu_fence_count_emitted(ring)) return true; } return false; @@ -5121,11 +5372,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev) dev_info(adev->dev, "GPU mode1 reset\n"); + /* Cache the state before bus master disable. The saved config space + * values are used in other cases like restore after mode-2 reset. + */ + amdgpu_device_cache_pci_state(adev->pdev); + /* disable BM */ pci_clear_master(adev->pdev); - amdgpu_device_cache_pci_state(adev->pdev); - if (amdgpu_dpm_is_mode1_reset_supported(adev)) { dev_info(adev->dev, "GPU smu mode1 reset\n"); ret = amdgpu_dpm_mode1_reset(adev); @@ -5170,16 +5424,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, { int i, r = 0; struct amdgpu_job *job = NULL; + struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; bool need_full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); if (reset_context->reset_req_dev == adev) job = reset_context->job; - if (amdgpu_sriov_vf(adev)) { - /* stop the data exchange thread */ - amdgpu_virt_fini_data_exchange(adev); - } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_pre_reset(adev); amdgpu_fence_driver_isr_toggle(adev, true); @@ -5228,6 +5481,16 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, } } + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) { + dev_info(tmp_adev->dev, "Dumping IP State\n"); + /* Trigger ip dump before we reset the asic */ + for (i = 0; i < tmp_adev->num_ip_blocks; i++) + if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) + tmp_adev->ip_blocks[i].version->funcs + ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]); + dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); + } + if (need_full_reset) r = amdgpu_device_ip_suspend(adev); if (need_full_reset) @@ -5240,94 +5503,35 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) { - int i; - - lockdep_assert_held(&adev->reset_domain->sem); + struct list_head *device_list_handle; + bool full_reset, vram_lost = false; + struct amdgpu_device *tmp_adev; + int r, init_level; - for (i = 0; i < adev->reset_info.num_regs; i++) { - adev->reset_info.reset_dump_reg_value[i] = - RREG32(adev->reset_info.reset_dump_reg_list[i]); + device_list_handle = reset_context->reset_device_list; - trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i], - adev->reset_info.reset_dump_reg_value[i]); - } - - return 0; -} - -int amdgpu_do_asic_reset(struct list_head *device_list_handle, - struct amdgpu_reset_context *reset_context) -{ - struct amdgpu_device *tmp_adev = NULL; - bool need_full_reset, skip_hw_reset, vram_lost = false; - int r = 0; - - /* Try reset handler method first */ - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); - amdgpu_reset_reg_dumps(tmp_adev); - - reset_context->reset_device_list = device_list_handle; - r = amdgpu_reset_perform_reset(tmp_adev, reset_context); - /* If reset handler not implemented, continue; otherwise return */ - if (r == -EOPNOTSUPP) - r = 0; - else - return r; + if (!device_list_handle) + return -EINVAL; - /* Reset handler not implemented, use the default method */ - need_full_reset = - test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); - /* - * ASIC reset has to be done on all XGMI hive nodes ASAP - * to allow proper links negotiation in FW (within 1 sec) + /** + * If it's reset on init, it's default init level, otherwise keep level + * as recovery level. */ - if (!skip_hw_reset && need_full_reset) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* For XGMI run all resets in parallel to speed up the process */ - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - tmp_adev->gmc.xgmi.pending_reset = false; - if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) - r = -EALREADY; - } else - r = amdgpu_asic_reset(tmp_adev); - - if (r) { - dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", - r, adev_to_drm(tmp_adev)->unique); - goto out; - } - } - - /* For XGMI wait for all resets to complete before proceed */ - if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - flush_work(&tmp_adev->xgmi_reset_work); - r = tmp_adev->asic_reset_res; - if (r) - break; - } - } - } - } - - if (!r && amdgpu_ras_intr_triggered()) { - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB); - } - - amdgpu_ras_intr_cleared(); - } + if (reset_context->method == AMD_RESET_METHOD_ON_INIT) + init_level = AMDGPU_INIT_LEVEL_DEFAULT; + else + init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY; + r = 0; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (need_full_reset) { + amdgpu_set_init_level(tmp_adev, init_level); + if (full_reset) { /* post card */ - amdgpu_ras_set_fed(tmp_adev, false); + amdgpu_ras_clear_err_state(tmp_adev); r = amdgpu_device_asic_init(tmp_adev); if (r) { dev_warn(tmp_adev->dev, "asic atom init failed!"); @@ -5340,7 +5544,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); - amdgpu_coredump(tmp_adev, vram_lost, reset_context); + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) + amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); if (vram_lost) { DRM_INFO("VRAM is lost due to GPU reset!\n"); @@ -5363,6 +5568,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (tmp_adev->mman.buffer_funcs_ring->sched.ready) amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true); + r = amdgpu_device_ip_resume_phase3(tmp_adev); + if (r) + goto out; + if (vram_lost) amdgpu_device_fill_reset_magic(tmp_adev); @@ -5380,7 +5589,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); + drm_client_dev_resume(adev_to_drm(tmp_adev), false); /* * The GPU enters bad state once faulty pages @@ -5392,7 +5601,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, * bad_page_threshold value to fix this once * probing driver again. */ - if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { + if (!amdgpu_ras_is_rma(tmp_adev)) { /* must succeed. */ amdgpu_ras_resume(tmp_adev); } else { @@ -5410,27 +5619,102 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, out: if (!r) { + /* IP init is complete now, set level as default */ + amdgpu_set_init_level(tmp_adev, + AMDGPU_INIT_LEVEL_DEFAULT); amdgpu_irq_gpu_reset_resume_helper(tmp_adev); r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); - need_full_reset = true; r = -EAGAIN; goto end; } } - if (!r) - r = amdgpu_device_recover_vram(tmp_adev); - else + if (r) tmp_adev->asic_reset_res = r; } end: - if (need_full_reset) + return r; +} + +int amdgpu_do_asic_reset(struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *tmp_adev = NULL; + bool need_full_reset, skip_hw_reset; + int r = 0; + + /* Try reset handler method first */ + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + + reset_context->reset_device_list = device_list_handle; + r = amdgpu_reset_perform_reset(tmp_adev, reset_context); + /* If reset handler not implemented, continue; otherwise return */ + if (r == -EOPNOTSUPP) + r = 0; + else + return r; + + /* Reset handler not implemented, use the default method */ + need_full_reset = + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + + /* + * ASIC reset has to be done on all XGMI hive nodes ASAP + * to allow proper links negotiation in FW (within 1 sec) + */ + if (!skip_hw_reset && need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_unbound_wq, + &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + } else + r = amdgpu_asic_reset(tmp_adev); + + if (r) { + dev_err(tmp_adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); + goto out; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, device_list_handle, + reset_list) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + } + + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + amdgpu_ras_reset_error_count(tmp_adev, + AMDGPU_RAS_BLOCK__MMHUB); + } + + amdgpu_ras_intr_cleared(); + } + + r = amdgpu_device_reinit_after_reset(reset_context); + if (r == -EAGAIN) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); else clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + +out: return r; } @@ -5538,6 +5822,23 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) } +static int amdgpu_device_health_check(struct list_head *device_list_handle) +{ + struct amdgpu_device *tmp_adev; + int ret = 0; + u32 status; + + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); + if (PCI_POSSIBLE_ERROR(status)) { + dev_err(tmp_adev->dev, "device lost from bus!"); + ret = -ENODEV; + } + } + + return ret; +} + /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -5561,8 +5862,21 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, int i, r = 0; bool need_emergency_restart = false; bool audio_suspended = false; + int retry_limit = AMDGPU_MAX_RETRY_LIMIT; /* + * If it reaches here because of hang/timeout and a RAS error is + * detected at the same time, let RAS recovery take care of it. + */ + if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) && + !amdgpu_sriov_vf(adev) && + reset_context->src != AMDGPU_RESET_SRC_RAS) { + dev_dbg(adev->dev, + "Gpu recovery from source: %d yielding to RAS error recovery handling", + reset_context->src); + return 0; + } + /* * Special case: RAS triggered and full reset isn't supported */ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); @@ -5595,7 +5909,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * to put adev in the 1st position. */ INIT_LIST_HEAD(&device_list); - if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { + if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { list_add_tail(&tmp_adev->reset_list, &device_list); if (adev->shutdown) @@ -5609,6 +5923,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } + if (!amdgpu_sriov_vf(adev)) { + r = amdgpu_device_health_check(device_list_handle); + if (r) + goto end_reset; + } + /* We need to lock reset domain only once both for XGMI and single device */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); @@ -5636,16 +5956,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - if (!amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_pre_reset(tmp_adev); + amdgpu_amdkfd_pre_reset(tmp_adev, reset_context); /* - * Mark these ASICs to be reseted as untracked first + * Mark these ASICs to be reset as untracked first * And add them back after reset completed */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); + drm_client_dev_suspend(adev_to_drm(tmp_adev), false); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -5690,33 +6009,40 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ r, adev_to_drm(tmp_adev)->unique); tmp_adev->asic_reset_res = r; } - - /* - * Drop all pending non scheduler resets. Scheduler resets - * were already dropped during drm_sched_stop - */ - amdgpu_device_stop_pending_resets(tmp_adev); } /* Actual ASIC resets if needed.*/ /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { - r = amdgpu_device_reset_sriov(adev, job ? false : true); + if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) { + dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); + amdgpu_ras_set_fed(adev, true); + set_bit(AMDGPU_HOST_FLR, &reset_context->flags); + } + + r = amdgpu_device_reset_sriov(adev, reset_context); + if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { + amdgpu_virt_release_full_gpu(adev, true); + goto retry; + } if (r) adev->asic_reset_res = r; - - /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ - if (amdgpu_ip_version(adev, GC_HWIP, 0) == - IP_VERSION(9, 4, 2) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) - amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); if (r && r == -EAGAIN) goto retry; } + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* + * Drop any pending non scheduler resets queued before reset is done. + * Any reset scheduled after this point would be valid. Scheduler resets + * were already dropped during drm_sched_stop and no new ones can come + * in before drm_sched_start. + */ + amdgpu_device_stop_pending_resets(tmp_adev); + } + skip_hw_reset: /* Post ASIC reset for all devs .*/ @@ -5728,7 +6054,7 @@ skip_hw_reset: if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched, true); + drm_sched_start(&ring->sched, 0); } if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) @@ -5740,8 +6066,14 @@ skip_hw_reset: tmp_adev->asic_reset_res = 0; if (r) { - /* bad news, how to tell it to userspace ? */ - dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); + /* bad news, how to tell it to userspace ? + * for ras error, we should report GPU bad status instead of + * reset failure + */ + if (reset_context->src != AMDGPU_RESET_SRC_RAS || + !amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", + atomic_read(&tmp_adev->gpu_reset_counter)); amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); @@ -5774,6 +6106,7 @@ skip_sched_resume: reset_list); amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); +end_reset: if (hive) { mutex_unlock(&hive->hive_lock); amdgpu_put_xgmi_hive(hive); @@ -5809,13 +6142,56 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; - while ((parent = pci_upstream_bridge(parent))) { - /* skip upstream/downstream switches internal to dGPU*/ - if (parent->vendor == PCI_VENDOR_ID_ATI) - continue; + if (amdgpu_device_pcie_dynamic_switching_supported(adev)) { + while ((parent = pci_upstream_bridge(parent))) { + /* skip upstream/downstream switches internal to dGPU*/ + if (parent->vendor == PCI_VENDOR_ID_ATI) + continue; + *speed = pcie_get_speed_cap(parent); + *width = pcie_get_width_cap(parent); + break; + } + } else { + /* use the current speeds rather than max if switching is not supported */ + pcie_bandwidth_available(adev->pdev, NULL, speed, width); + } +} + +/** + * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU + * + * @adev: amdgpu_device pointer + * @speed: pointer to the speed of the link + * @width: pointer to the width of the link + * + * Evaluate the hierarchy to find the speed and bandwidth capabilities of the + * AMD dGPU which may be a virtual upstream bridge. + */ +static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + struct pci_dev *parent = adev->pdev; + + if (!speed || !width) + return; + + parent = pci_upstream_bridge(parent); + if (parent && parent->vendor == PCI_VENDOR_ID_ATI) { + /* use the upstream/downstream switches internal to dGPU */ *speed = pcie_get_speed_cap(parent); *width = pcie_get_width_cap(parent); - break; + while ((parent = pci_upstream_bridge(parent))) { + if (parent->vendor == PCI_VENDOR_ID_ATI) { + /* use the upstream/downstream switches internal to dGPU */ + *speed = pcie_get_speed_cap(parent); + *width = pcie_get_width_cap(parent); + } + } + } else { + /* use the device itself */ + *speed = pcie_get_speed_cap(adev->pdev); + *width = pcie_get_width_cap(adev->pdev); } } @@ -5824,15 +6200,14 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * - * Fetchs and stores in the driver the PCIE capabilities (gen speed + * Fetches and stores in the driver the PCIE capabilities (gen speed * and lanes) of the slot the device is in. Handles APUs and * virtualized environments where PCIE config space may not be available. */ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) { - struct pci_dev *pdev; enum pci_bus_speed speed_cap, platform_speed_cap; - enum pcie_link_width platform_link_width; + enum pcie_link_width platform_link_width, link_width; if (amdgpu_pcie_gen_cap) adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; @@ -5854,11 +6229,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) amdgpu_device_partner_bandwidth(adev, &platform_speed_cap, &platform_link_width); + amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width); if (adev->pm.pcie_gen_mask == 0) { /* asic caps */ - pdev = adev->pdev; - speed_cap = pcie_get_speed_cap(pdev); if (speed_cap == PCI_SPEED_UNKNOWN) { adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | @@ -5914,51 +6288,103 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } if (adev->pm.pcie_mlw_mask == 0) { + /* asic caps */ + if (link_width == PCIE_LNK_WIDTH_UNKNOWN) { + adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK; + } else { + switch (link_width) { + case PCIE_LNK_X32: + adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case PCIE_LNK_X16: + adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case PCIE_LNK_X12: + adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case PCIE_LNK_X8: + adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case PCIE_LNK_X4: + adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case PCIE_LNK_X2: + adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case PCIE_LNK_X1: + adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1; + break; + default: + break; + } + } + /* platform caps */ if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; } else { switch (platform_link_width) { case PCIE_LNK_X32: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; case PCIE_LNK_X16: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; case PCIE_LNK_X12: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; case PCIE_LNK_X8: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; case PCIE_LNK_X4: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; case PCIE_LNK_X2: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; case PCIE_LNK_X1: - adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; + adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; break; default: break; @@ -5981,18 +6407,27 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev) { #ifdef CONFIG_HSA_AMD_P2P - uint64_t address_mask = peer_adev->dev->dma_mask ? - ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); - resource_size_t aper_limit = - adev->gmc.aper_base + adev->gmc.aper_size - 1; bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); + if (!p2p_access) + dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n", + pci_name(peer_adev->pdev)); + + bool is_large_bar = adev->gmc.visible_vram_size && + adev->gmc.real_vram_size == adev->gmc.visible_vram_size; + bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev); - return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && - adev->gmc.real_vram_size == adev->gmc.visible_vram_size && - !(adev->gmc.aper_base & address_mask || - aper_limit & address_mask)); + if (!p2p_addressable) { + uint64_t address_mask = peer_adev->dev->dma_mask ? + ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); + resource_size_t aper_limit = + adev->gmc.aper_base + adev->gmc.aper_size - 1; + + p2p_addressable = !(adev->gmc.aper_base & address_mask || + aper_limit & address_mask); + } + return pcie_p2p && is_large_bar && p2p_access && p2p_addressable; #else return false; #endif @@ -6030,7 +6465,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev) adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); - if (amdgpu_passthrough(adev) && + if (amdgpu_passthrough(adev) && adev->nbio.funcs && adev->nbio.funcs->clear_doorbell_interrupt) adev->nbio.funcs->clear_doorbell_interrupt(adev); @@ -6130,19 +6565,11 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) struct amdgpu_reset_context reset_context; u32 memsize; struct list_head device_list; - struct amdgpu_hive_info *hive; - int hive_ras_recovery = 0; - struct amdgpu_ras *ras; /* PCI error slot reset should be skipped During RAS recovery */ - hive = amdgpu_get_xgmi_hive(adev); - if (hive) { - hive_ras_recovery = atomic_read(&hive->ras_recovery); - amdgpu_put_xgmi_hive(hive); - } - ras = amdgpu_ras_get_context(adev); - if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) && - ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) && + amdgpu_ras_in_recovery(adev)) return PCI_ERS_RESULT_RECOVERED; DRM_INFO("PCI error: slot reset callback!!\n"); @@ -6225,7 +6652,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched, true); + drm_sched_start(&ring->sched, 0); } amdgpu_device_unset_mp1_state(adev); @@ -6238,6 +6665,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); int r; + if (amdgpu_sriov_vf(adev)) + return false; + r = pci_save_state(pdev); if (!r) { kfree(adev->pci_state); @@ -6385,6 +6815,22 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, } /** + * amdgpu_device_get_gang - return a reference to the current gang + * @adev: amdgpu_device pointer + * + * Returns: A new reference to the current gang leader. + */ +struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev) +{ + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&adev->gang_submit); + rcu_read_unlock(); + return fence; +} + +/** * amdgpu_device_switch_gang - switch to a new gang * @adev: amdgpu_device pointer * @gang: the gang to switch to @@ -6400,10 +6846,7 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, do { dma_fence_put(old); - rcu_read_lock(); - old = dma_fence_get_rcu_safe(&adev->gang_submit); - rcu_read_unlock(); - + old = amdgpu_device_get_gang(adev); if (old == gang) break; @@ -6485,3 +6928,47 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, } return ret; } + +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring) +{ + ssize_t size = 0; + + if (!ring || !ring->adev) + return size; + + if (amdgpu_device_should_recover_gpu(ring->adev)) + size |= AMDGPU_RESET_TYPE_FULL; + + if (unlikely(!ring->adev->debug_disable_soft_recovery) && + !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery) + size |= AMDGPU_RESET_TYPE_SOFT_RESET; + + return size; +} + +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset) +{ + ssize_t size = 0; + + if (supported_reset == 0) { + size += sysfs_emit_at(buf, size, "unsupported"); + size += sysfs_emit_at(buf, size, "\n"); + return size; + + } + + if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET) + size += sysfs_emit_at(buf, size, "soft "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) + size += sysfs_emit_at(buf, size, "queue "); + + if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE) + size += sysfs_emit_at(buf, size, "pipe "); + + if (supported_reset & AMDGPU_RESET_TYPE_FULL) + size += sysfs_emit_at(buf, size, "full "); + + size += sysfs_emit_at(buf, size, "\n"); + return size; +} |