summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c97
1 files changed, 57 insertions, 40 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 171a658135b5..e1d63bed84bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,12 +37,13 @@
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "mxgpu_vi.h"
+#include "amdgpu_reset.h"
+
/* VI golden setting */
static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
@@ -279,32 +280,32 @@ void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(
- xgpu_fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_settings_a10,
- (const u32)ARRAY_SIZE(
- xgpu_fiji_golden_settings_a10));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_common_all,
- (const u32)ARRAY_SIZE(
- xgpu_fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_settings_a10,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_settings_a10));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(
- xgpu_tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_settings_a11,
- (const u32)ARRAY_SIZE(
- xgpu_tonga_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_common_all,
- (const u32)ARRAY_SIZE(
- xgpu_tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_settings_a11,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_common_all));
break;
default:
BUG_ON("Doesn't support chip type.\n");
@@ -333,7 +334,7 @@ static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
break;
}
mdelay(1);
- timeout -=1;
+ timeout -= 1;
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
}
@@ -446,8 +447,10 @@ static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
request == IDH_REQ_GPU_FINI_ACCESS ||
request == IDH_REQ_GPU_RESET_ACCESS) {
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
- if (r)
- pr_err("Doesn't get ack from pf, continue\n");
+ if (r) {
+ pr_err("Doesn't get ack from pf, give up\n");
+ return r;
+ }
}
return 0;
@@ -458,6 +461,11 @@ static int xgpu_vi_request_reset(struct amdgpu_device *adev)
return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
}
+static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
+{
+ return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
+}
+
static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
@@ -507,14 +515,18 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
- /* wait until RCV_MSG become 3 */
- if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to recieve FLR_CMPL\n");
- return;
- }
-
/* Trigger recovery due to world switch failure */
- amdgpu_sriov_gpu_reset(adev, NULL);
+ if (amdgpu_device_should_recover_gpu(adev)) {
+ struct amdgpu_reset_context reset_context;
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
}
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -537,14 +549,17 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
{
int r;
- /* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
+ /* trigger gpu-reset by hypervisor only if TDR disabled */
+ if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
/* only handle FLR_NOTIFY now */
if (!r)
- schedule_work(&adev->virt.flr_work);
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
}
return 0;
@@ -572,11 +587,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
@@ -613,4 +628,6 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
.req_full_gpu = xgpu_vi_request_full_gpu_access,
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
.reset_gpu = xgpu_vi_request_reset,
+ .wait_reset = xgpu_vi_wait_reset_cmpl,
+ .trans_msg = NULL, /* Does not need to trans VF errors to host. */
};