summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_ggtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_ggtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c89
1 files changed, 75 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index ec1cbe229f0e..46a5aa4ab9c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -9,10 +9,9 @@
#include <linux/stop_machine.h>
#include <drm/drm_managed.h>
-#include <drm/i915_drm.h>
-#include <drm/intel-gtt.h>
+#include <drm/intel/i915_drm.h>
+#include <drm/intel/intel-gtt.h>
-#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_context.h"
@@ -108,11 +107,12 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
+ * @evict_all: Evict all VMAs
*
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*/
-void i915_ggtt_suspend_vm(struct i915_address_space *vm)
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
{
struct i915_vma *vma, *vn;
int save_skip_rewrite;
@@ -158,7 +158,7 @@ retry:
goto retry;
}
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ if (evict_all || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
i915_vma_wait_for_bind(vma);
__i915_vma_evict(vma, false);
@@ -173,13 +173,15 @@ retry:
vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
+
+ drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
struct intel_gt *gt;
- i915_ggtt_suspend_vm(&ggtt->vm);
+ i915_ggtt_suspend_vm(&ggtt->vm, false);
ggtt->invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
@@ -231,11 +233,8 @@ static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_active(uncore->rpm, wakeref) {
- struct intel_guc *guc = &gt->uc.guc;
-
- intel_guc_invalidate_tlb_guc(guc);
- }
+ with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
+ intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -246,7 +245,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
+ if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
guc_ggtt_ct_invalidate(gt);
else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
@@ -290,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN8_PAGE_PRESENT;
+ *is_local = pte & GEN12_GGTT_PTE_LM;
+
+ return pte & GEN12_GGTT_PTE_ADDR_MASK;
+}
+
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
@@ -436,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
writeq(pte, addr);
}
+static gen8_pte_t gen8_get_pte(void __iomem *addr)
+{
+ return readq(addr);
+}
+
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -451,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
+}
+
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
@@ -606,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset,
+ bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return vm->pte_decode(ioread32(pte), is_present, is_local);
+}
+
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
@@ -770,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ return ggtt->vm.read_entry(vm, offset, is_present, is_local);
+}
+
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
@@ -1246,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen8_ggtt_read_entry;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
@@ -1292,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
+
return ggtt_probe_common(ggtt, size);
}
@@ -1391,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN6_PTE_VALID;
+ *is_local = false;
+
+ return ((pte & 0xff0) << 28) | (pte & ~0xfff);
+}
+
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -1429,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen6_ggtt_read_entry;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -1444,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_decode = gen6_pte_decode;
+
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
@@ -1549,6 +1604,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
* @vm: The VM to restore the mappings for
+ * @all_evicted: Were all VMAs expected to be evicted on suspend?
*
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
@@ -1556,13 +1612,18 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
* Returns %true if restoring the mapping for any object that was in a write
* domain before suspend.
*/
-bool i915_ggtt_resume_vm(struct i915_address_space *vm)
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
{
struct i915_vma *vma;
bool write_domain_objs = false;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+ if (all_evicted) {
+ drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
+ return false;
+ }
+
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
@@ -1602,7 +1663,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_gt_check_and_clear_faults(gt);
- flush = i915_ggtt_resume_vm(&ggtt->vm);
+ flush = i915_ggtt_resume_vm(&ggtt->vm, false);
if (drm_mm_node_allocated(&ggtt->error_capture))
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,