summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c237
1 files changed, 181 insertions, 56 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 57ec108b5972..130a9adf09ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -71,6 +71,8 @@
#include <drm/task_barrier.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -82,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -119,6 +122,8 @@ const char *amdgpu_asic_name[] = {
"NAVY_FLOUNDER",
"VANGOGH",
"DIMGREY_CAVEFISH",
+ "BEIGE_GOBY",
+ "YELLOW_CARP",
"LAST",
};
@@ -262,6 +267,21 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
return amdgpu_asic_supports_baco(adev);
}
+/**
+ * amdgpu_device_supports_smart_shift - Is the device dGPU with
+ * smart shift support
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device is a dGPU with Smart Shift support,
+ * otherwise returns false.
+ */
+bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+{
+ return (amdgpu_device_supports_boco(dev) &&
+ amdgpu_acpi_is_power_shift_control_supported());
+}
+
/*
* VRAM access helper functions
*/
@@ -281,7 +301,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
unsigned long flags;
uint32_t hi = ~0;
uint64_t last;
+ int idx;
+ if (!drm_dev_enter(&adev->ddev, &idx))
+ return;
#ifdef CONFIG_64BIT
last = min(pos + size, adev->gmc.visible_vram_size);
@@ -292,15 +315,15 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
if (write) {
memcpy_toio(addr, buf, count);
mb();
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
} else {
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_device_invalidate_hdp(adev, NULL);
mb();
memcpy_fromio(buf, addr, count);
}
if (count == size)
- return;
+ goto exit;
pos += count;
buf += count / 4;
@@ -323,6 +346,11 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+#ifdef CONFIG_64BIT
+exit:
+#endif
+ drm_dev_exit(idx);
}
/*
@@ -332,7 +360,7 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
/* Check if hw access should be skipped because of hotplug or device error */
bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
{
- if (adev->in_pci_err_recovery)
+ if (adev->no_hw_access)
return true;
#ifdef CONFIG_LOCKDEP
@@ -490,7 +518,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -1820,6 +1848,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
default:
return 0;
case CHIP_VEGA10:
@@ -1857,6 +1886,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_VANGOGH:
chip_name = "vangogh";
break;
+ case CHIP_YELLOW_CARP:
+ chip_name = "yellow_carp";
+ break;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -2033,9 +2065,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
if (adev->asic_type == CHIP_VANGOGH)
adev->family = AMDGPU_FAMILY_VGH;
+ else if (adev->asic_type == CHIP_YELLOW_CARP)
+ adev->family = AMDGPU_FAMILY_YC;
else
adev->family = AMDGPU_FAMILY_NV;
@@ -2571,34 +2607,26 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_ip_fini - run fini for hardware IPs
- *
- * @adev: amdgpu_device pointer
- *
- * Main teardown pass for hardware IPs. The list of all the hardware
- * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
- * are run. hw_fini tears down the hardware associated with each IP
- * and sw_fini tears down any software state associated with each IP.
- * Returns 0 on success, negative error code on failure.
- */
-static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
{
int i, r;
- if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
- amdgpu_virt_release_ras_err_handler_data(adev);
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].version->funcs->early_fini)
+ continue;
- amdgpu_ras_pre_fini(adev);
+ r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
+ if (r) {
+ DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
- if (adev->gmc.xgmi.num_physical_nodes > 1)
- amdgpu_xgmi_remove_device(adev);
+ amdgpu_amdkfd_suspend(adev, false);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_device_fini(adev);
-
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -2629,6 +2657,33 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_fini - run fini for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main teardown pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
+ * are run. hw_fini tears down the hardware associated with each IP
+ * and sw_fini tears down any software state associated with each IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
+ amdgpu_virt_release_ras_err_handler_data(adev);
+
+ amdgpu_ras_pre_fini(adev);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
+ amdgpu_amdkfd_device_fini_sw(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
@@ -2856,7 +2911,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};
- for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
+ for (i = 0; i < adev->num_ip_blocks; i++) {
int j;
struct amdgpu_ip_block *block;
@@ -3034,7 +3089,7 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
if (adev->is_atom_fw) {
- if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
+ if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
} else {
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
@@ -3097,7 +3152,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
#endif
return amdgpu_dc != 0;
#endif
@@ -3181,8 +3238,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
int ret = 0;
/*
- * By default timeout for non compute jobs is 10000.
- * And there is no timeout enforced on compute jobs.
+ * By default timeout for non compute jobs is 10000
+ * and 60000 for compute jobs.
* In SR-IOV or passthrough mode, timeout for compute
* jobs are 60000 by default.
*/
@@ -3191,10 +3248,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else if (amdgpu_passthrough(adev))
- adev->compute_timeout = msecs_to_jiffies(60000);
else
- adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+ adev->compute_timeout = msecs_to_jiffies(60000);
if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
while ((timeout_setting = strsep(&input, ",")) &&
@@ -3251,7 +3306,6 @@ static const struct attribute *amdgpu_dev_attributes[] = {
NULL
};
-
/**
* amdgpu_device_init - initialize the driver
*
@@ -3653,6 +3707,27 @@ failed_unmap:
return r;
}
+static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
+{
+ /* Clear all CPU mappings pointing to this device */
+ unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
+
+ /* Unmap all mapped bars - Doorbell, registers and VRAM */
+ amdgpu_device_doorbell_fini(adev);
+
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
+ /* Memory manager related */
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+}
+
/**
* amdgpu_device_fini - tear down the driver
*
@@ -3661,15 +3736,13 @@ failed_unmap:
* Tear down the driver info (all asics).
* Called at driver shutdown.
*/
-void amdgpu_device_fini(struct amdgpu_device *adev)
+void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
adev->shutdown = true;
- kfree(adev->pci_state);
-
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
* */
@@ -3686,11 +3759,29 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
else
drm_atomic_helper_shutdown(adev_to_drm(adev));
}
- amdgpu_fence_driver_fini(adev);
+ amdgpu_fence_driver_fini_hw(adev);
+
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
+
amdgpu_fbdev_fini(adev);
+
+ amdgpu_irq_fini_hw(adev);
+
+ amdgpu_device_ip_fini_early(adev);
+
+ amdgpu_gart_dummy_page_fini(adev);
+
+ amdgpu_device_unmap_mmio(adev);
+}
+
+void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+{
amdgpu_device_ip_fini(adev);
+ amdgpu_fence_driver_fini_sw(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@@ -3712,18 +3803,14 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
}
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
- iounmap(adev->rmmio);
- adev->rmmio = NULL;
- amdgpu_device_doorbell_fini(adev);
-
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (adev->mman.discovery_bin)
amdgpu_discovery_fini(adev);
+
+ kfree(adev->pci_state);
+
}
@@ -3743,12 +3830,15 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
+ DRM_WARN("smart shift update failed\n");
+
drm_kms_helper_poll_disable(dev);
if (fbcon)
@@ -3758,7 +3848,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_ras_suspend(adev);
- r = amdgpu_device_ip_suspend_phase1(adev);
+ amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
@@ -3768,7 +3858,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_device_ip_suspend_phase2(adev);
+ amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
* using the CPU.
@@ -3858,6 +3948,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
#endif
adev->in_suspend = false;
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
+ DRM_WARN("smart shift update failed\n");
+
return 0;
}
@@ -4031,6 +4124,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
{
struct dma_fence *fence = NULL, *next = NULL;
struct amdgpu_bo *shadow;
+ struct amdgpu_bo_vm *vmbo;
long r = 1, tmo;
if (amdgpu_sriov_runtime(adev))
@@ -4040,12 +4134,12 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
-
+ list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
+ shadow = &vmbo->bo;
/* No need to recover an evicted BO */
- if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
- shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
- shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
r = amdgpu_bo_restore_shadow(shadow, &next);
@@ -4634,7 +4728,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_device_recheck_guilty_jobs(
+static void amdgpu_device_recheck_guilty_jobs(
struct amdgpu_device *adev, struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
@@ -4937,6 +5031,8 @@ skip_hw_reset:
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
+ DRM_WARN("smart shift update failed\n");
}
}
@@ -5125,7 +5221,8 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
- if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
+ if (ras && adev->ras_enabled &&
+ adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
return amdgpu_dpm_baco_enter(adev);
@@ -5144,7 +5241,8 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
if (ret)
return ret;
- if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
+ if (ras && adev->ras_enabled &&
+ adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
@@ -5290,9 +5388,9 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- adev->in_pci_err_recovery = true;
+ adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->in_pci_err_recovery = false;
+ adev->no_hw_access = false;
if (r)
goto out;
@@ -5387,4 +5485,31 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
return true;
}
+void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+#ifdef CONFIG_X86_64
+ if (adev->flags & AMD_IS_APU)
+ return;
+#endif
+ if (adev->gmc.xgmi.connected_to_cpu)
+ return;
+ if (ring && ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ else
+ amdgpu_asic_flush_hdp(adev, ring);
+}
+
+void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+#ifdef CONFIG_X86_64
+ if (adev->flags & AMD_IS_APU)
+ return;
+#endif
+ if (adev->gmc.xgmi.connected_to_cpu)
+ return;
+
+ amdgpu_asic_invalidate_hdp(adev, ring);
+}