summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/si.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/si.c')
-rw-r--r--drivers/gpu/drm/radeon/si.c48
1 files changed, 31 insertions, 17 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 011779bd2b3d..3a0b973e8a96 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
- int i, j, k;
+ int i, j;
switch (rdev->family) {
case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
+ rdev->config.si.active_cus = 0;
for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
- for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
- rdev->config.si.active_cus +=
- hweight32(si_get_cu_active_bitmap(rdev, i, j));
- }
+ rdev->config.si.active_cus +=
+ hweight32(si_get_cu_active_bitmap(rdev, i, j));
}
}
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
si_cp_enable(rdev, true);
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring);
+ radeon_ring_unlock_commit(rdev, ring, false);
}
return 0;
@@ -4291,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
- rdev->gart.table_addr >> 12);
+ rdev->vm_manager.saved_table_addr[i]);
else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
- rdev->gart.table_addr >> 12);
+ rdev->vm_manager.saved_table_addr[i]);
}
/* enable context1-15 */
@@ -4326,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
static void si_pcie_gart_disable(struct radeon_device *rdev)
{
+ unsigned i;
+
+ for (i = 1; i < 16; ++i) {
+ uint32_t reg;
+ if (i < 8)
+ reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
+ else
+ reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
+ rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
+ }
+
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
@@ -5028,7 +5038,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
@@ -5036,7 +5046,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
@@ -6306,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6654,13 +6664,13 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
@@ -7178,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
int ret, i;
u16 tmp16;
+ if (pci_is_root_bus(rdev->pdev->bus))
+ return;
+
if (radeon_pcie_gen2 == 0)
return;
@@ -7455,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev)
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
- if (!disable_clkreq) {
+ if (!disable_clkreq &&
+ !pci_is_root_bus(rdev->pdev->bus)) {
struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap;