summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
diff options
context:
space:
mode:
authorLe Ma <le.ma@amd.com>2022-05-24 11:22:49 +0800
committerAlex Deucher <alexander.deucher@amd.com>2023-04-18 16:28:55 -0400
commitd51ac6d0a23caf1005cb640f8533161c5d2dd0c0 (patch)
tree0873c9c0b3246a5564bb3100e4b105a47e2a5d00 /drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
parentec08571aca7c5e6bf2d1820db9c8aaa104eb9b68 (diff)
drm/amdgpu: add xcc index argument to select_sh_se function v2
v1: To support multiple XCD case (Le) v2: introduce xcc index to gfx_v11_0_select_sh_se (Hawking) Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d9ce4d1c50e4..7cb72bf1acdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1285,7 +1285,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -1438,12 +1438,12 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on SI */
- gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
}
/* GRBM_GFX_INDEX has a different offset on SI */
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
@@ -1459,14 +1459,14 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v6_0_get_rb_active_bitmap(adev);
active_rbs |= data <<
((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -1487,7 +1487,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -1496,7 +1496,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG);
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1535,7 +1535,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
active_cu = gfx_v6_0_get_cu_enabled(adev);
@@ -1550,7 +1550,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
}
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -2391,7 +2391,7 @@ static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
if (!enable) {
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmSPI_LB_CU_MASK, 0x00ff);
}
}
@@ -3571,7 +3571,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
@@ -3593,7 +3593,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;