summaryrefslogtreecommitdiff
path: root/drivers/accel/ivpu/ivpu_mmu_context.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 18:28:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 18:28:03 -0800
commita5c95ca18a98d742d0a4a04063c32556b5b66378 (patch)
treefdd897b23a1c45b3d03bd1e75e5df42057f339d1 /drivers/accel/ivpu/ivpu_mmu_context.c
parent307e14c039063f0c9bd7a18a7add8f940580dcc9 (diff)
parenta48bba98380cb0b43dcd01d276c7efc282e3c33f (diff)
Merge tag 'drm-next-2023-02-23' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "There are a bunch of changes all over in the usual places. Highlights: - habanalabs moves from misc to accel - first accel driver for Intel VPU (Versatile Processing Unit) inference engine - dropped all the ancient legacy DRI1 drivers. I think it's been at least 10 years since anyone has heard about these. - Intel DG2 updates and prelim Meteorlake enablement - etnaviv adds support for Versilicon NPU device (a GPU like engine with inference accelerators) Detailed summary: Removals: - remove legacy dri1 drivers: i810, mga, r128, savage, sis, tdfx, via New driver: - intel VPU accelerator driver - habanalabs comes via drm tree now drm/core: - use drm_dbg_ helpers in several places - Document defaults for CRTC backgrounds - Document use of drm_minor edid: - improve mode parsing and refactoring connector: - support analog TV mode property media: - add some common formats udmabuf: - add vmap/vunmap methods fourcc: - add XRGB1555 and RGB565 formats - document open source user waiver firmware: - fix color-format selection for system framebuffer format-helper: - Add conversion from XRGB8888 to various sysfb formats - Make XRGB8888 the only driver-emulated legacy format - Add conversion from XRGB8888 to XBGR8888 and ABGR8888 fb-helper: - fix preferred depth and bpp values across drivers - Avoid blank consoles from selecting an incorrect color format probe-helper: - Enable/disable HPD on connectors scheduler: - Fix lockup in drm_sched_entity_kill() - Deprecate drm_sched_resubmit_jobs() bridge: - remove unused functions - implement i2c probe_new in various drivers - ite-it6505: Locking fixes, Cache EDID data - ite-it66121: Support IT6610 chip - lontium-tl9611: Fix HDMI on DragonBoard 845c - parade-ps8640: Use atomic bridge functions - Support i.MX93 LDB plus DT bindings debugfs: - add per device helpers and convert drivers displayport: - mst fixes - add DP adaptive sync DPCD definitions fbdev: - always pick 32bpp as default - remove some unused code simpledrm: - support system memory framebuffers panel: - add orientation quirks for Lenovo Yoga Tab 3 X90F and DynaBook K50 - Use ktime_get_boottime() to measure power-down delay - Fix auto-suspend delay - Visionox VTDR6130 AMOLED DSI - Support Himax HX8394 - Convert many drivers to common generic DSI write-sequence helper - AUO A030JTN01 ttm: - drop bo wait wrapper - fix MIPS build habanalabs: - moved driver to accel subsystem - gaudi2 decoder error improvement - more trace events - Gaudi2 abrupt reset by firmware support - add uAPI to flush memory transactions - add uAPI to pass through userspace reqs to fw - remove dma-buf export by handle amdgpu: - add new INFO queries for peak and min sclk/mclk for profile modes - Add PCIe info to the INFO IOCTL - secure display support for multiple displays - DML optimizations - DCN 3.2 updates - PSR updates - DP 2.1 updates - SR-IOV RAS updates - VCN RAS support - SMU 13.x updates - Switch 1 element arrays to flexible arrays - Add RAS support for DF 4.3 - Stack size improvements - S0ix rework - Allow 0 as a vram limit on APUs - Handle profiling modes for SMU13.x - Fix possible segfault in failure case - Rework FW requests to happen in early_init for all IPs so that we don't lose the sbios console if FW is missing - Fix power reporting on certain firmwares for CZN/RN - Allow S0ix without BIOS support - Enable freesync over PCon - Re-enable the AGP aperture on GMC 11.x amdkfd: - Error handling fixes - PASID fixes - Fix for cleared VRAM BOs - Fix cleanup if GPUVM creation fails - Memory accounting fix - Use resource_size rather than open codeing it - GC11 mGPU fix radeon: - Switch 1 element arrays to flexible arrays - Fix memory leak on shutdown - move to new logging i915: - Meteorlake display/OA/GSC fw/workarounds enabling - DP MST DSC support - Gamma/degamma readout support for the state checker - Enable SDP split support for DP 2.0 - Add probe blocking support to i915.force_probe parameter - Enable Xe HP 4tile support - Avoid display direct calls to uncore - Fix HuC delayed load memory leaks - Add DG2 workarounds Wa_18018764978 and Wa_18019271663 - Improve suspend / resume times with VT-d scanout workaround active - Fix DG2 visual corruption on small BAR systems by not forgetting to copy CCS aux state - Fix TLB invalidation for Gen12.50 video and compute engines - Enable HF-EEODB by switching HDMI, DP and LVDS to use struct drm_edid - Start using unversioned DMC firmware paths for new platforms - ELD refactor: Stop using hardware buffer, precompute ELD - lots of display code refactoring nouveau: - drop legacy ioctl support - replace 0-sized array msm: - dpu/dsi/mdss: Support for SM8350, SM8450 SM8550 and SC8280XP platform - Added bindings for SM8150 - dpu: Partial support for DSC on SM8150 and SM8250 - dpu: Fixed color transformation matrix being lost on suspend/resume - dp: Support SDM845 and SC8280XP platforms - dp: Support for limiting DP link rate via DT property - dsi: Validate display modes according to the DSI OPP table - dsi: DSI PHY support for the SM6375 platform - Add MSM_SUBMIT_BO_NO_IMPLICI - a2xx: Support to load legacy firmware - a6xx: GPU devcore dump updates for a650/a660 - GPU devfreq tuning and fixes - Turn 8960 HDMI PHY into clock provider, - Make 8960 HDMI PHY use PXO clock from DT etnaviv: - experimental versilicon NPU support - report GPU load via fdinfo format - MMU fault message improvements tegra: - rework syncpoint interrupt mediatek: - DSI timing fix - fix config deps ast: - various fixes exynos: - restore bridge chain order fixes gud: - convert to shadow plane buffers - perform flushing synchronously during atomic update - Use new debugfs helpers arm/hdlcd: - Use new debugfs helper ili9486: - Support 16-bit pixel data imx: - Split off IPUv3 driver mipi-dbi: - convert to DRM shadow-plane helpers - rsp driver changes - Support separate I/O-voltage supply mxsfb: - Depend on ARCH_MXS or ARCH_MXC sun4i: - convert to new TV mode property vc4: - convert to new TV mode property - kunit tests - Support RGB565 and RGB666 formats - convert dsi driver to bridge - Various HVS an CRTC fixes v3d: - Do not opencode drm_gem_object_lookup() virtio: - improve tracing vkms: - support small cursors in IGT tests - Fix SEGFAULT from incorrect GEM-buffer mapping rcar-du: - fixes and improvements" * tag 'drm-next-2023-02-23' of git://anongit.freedesktop.org/drm/drm: (1455 commits) msm/fbdev: fix unused variable warning with clang. drm/fb-helper: Remove drm_fb_helper_unprepare() from drm_fb_helper_fini() dma-buf: make kobj_type structure constant drm/shmem-helper: Fix locking for drm_gem_shmem_get_pages_sgt() drm/amd/display: disable SubVP + DRR to prevent underflow drm/amd/display: Fail atomic_check early on normalize_zpos error drm/amd/pm: avoid unaligned access warnings drm/amd/display: avoid unaligned access warnings drm/amd/display: Remove duplicate/repeating expressions drm/amd/display: Remove duplicate/repeating expression drm/amd/display: Make variables declaration inside ifdef guard drm/amd/display: Fix excess arguments on kernel-doc drm/amd/display: Add previously missing includes drm/amd/amdgpu: Add function prototypes to headers drm/amd/display: Add function prototypes to headers drm/amd/display: Turn global functions into static drm/amd/display: remove unused _calculate_degamma_curve function drm/amd/display: remove unused func declaration from resource headers drm/amd/display: unset initial value for tf since it's never used drm/amd/display: camel case cleanup in color_gamma file ...
Diffstat (limited to 'drivers/accel/ivpu/ivpu_mmu_context.c')
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c398
1 files changed, 398 insertions, 0 deletions
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
new file mode 100644
index 000000000000..8ce9b12ac356
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/highmem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+
+#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30)
+#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
+#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
+#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0)
+#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
+#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
+#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
+#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
+#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
+#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
+
+#define IVPU_MMU_PAGE_SIZE SZ_4K
+#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
+#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
+#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
+
+#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
+#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
+#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
+#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
+ IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
+
+static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ dma_addr_t pgd_dma;
+ u64 *pgd;
+
+ pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL);
+ if (!pgd)
+ return -ENOMEM;
+
+ pgtable->pgd = pgd;
+ pgtable->pgd_dma = pgd_dma;
+
+ return 0;
+}
+
+static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ int pgd_index, pmd_index;
+
+ for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) {
+ u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index];
+ u64 *pmd = pgtable->pgd_entries[pgd_index];
+
+ if (!pmd_entries)
+ continue;
+
+ for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) {
+ if (pmd_entries[pmd_index])
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE,
+ pmd_entries[pmd_index],
+ pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ }
+
+ kfree(pmd_entries);
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index],
+ pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ }
+
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd,
+ pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+}
+
+static u64*
+ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index)
+{
+ u64 **pmd_entries;
+ dma_addr_t pmd_dma;
+ u64 *pmd;
+
+ if (pgtable->pgd_entries[pgd_index])
+ return pgtable->pgd_entries[pgd_index];
+
+ pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+ if (!pmd)
+ return NULL;
+
+ pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+ if (!pmd_entries)
+ goto err_free_pgd;
+
+ pgtable->pgd_entries[pgd_index] = pmd;
+ pgtable->pgd_cpu_entries[pgd_index] = pmd_entries;
+ pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pmd;
+
+err_free_pgd:
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma);
+ return NULL;
+}
+
+static u64*
+ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
+ int pgd_index, int pmd_index)
+{
+ dma_addr_t pte_dma;
+ u64 *pte;
+
+ if (pgtable->pgd_cpu_entries[pgd_index][pmd_index])
+ return pgtable->pgd_cpu_entries[pgd_index][pmd_index];
+
+ pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte;
+ pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pte;
+}
+
+static int
+ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, dma_addr_t dma_addr, int prot)
+{
+ u64 *pte;
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Allocate PMD - second level page table if needed */
+ if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index))
+ return -ENOMEM;
+
+ /* Allocate PTE - third level page table if needed */
+ pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index);
+ if (!pte)
+ return -ENOMEM;
+
+ /* Update PTE - third level page table with DMA address */
+ pte[pte_index] = dma_addr | prot;
+
+ return 0;
+}
+
+static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
+{
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Update PTE with dummy physical address and clear flags */
+ ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID;
+}
+
+static void
+ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
+{
+ u64 end_addr = vpu_addr + size;
+ u64 *pgd = ctx->pgtable.pgd;
+
+ /* Align to PMD entry (2 MB) */
+ vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
+
+ while (vpu_addr < end_addr) {
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
+ u64 *pmd = ctx->pgtable.pgd_entries[pgd_index];
+
+ while (vpu_addr < end_addr && vpu_addr < pmd_end) {
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index];
+
+ clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE);
+ vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+ }
+ clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE);
+ }
+ clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE);
+}
+
+static int
+ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot)
+{
+ while (size) {
+ int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+
+ if (ret)
+ return ret;
+
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ dma_addr += IVPU_MMU_PAGE_SIZE;
+ size -= IVPU_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
+{
+ while (size) {
+ ivpu_mmu_context_unmap_page(ctx, vpu_addr);
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ size -= IVPU_MMU_PAGE_SIZE;
+ }
+}
+
+int
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
+{
+ struct scatterlist *sg;
+ int prot;
+ int ret;
+ u64 i;
+
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
+ return -EINVAL;
+ /*
+ * VPU is only 32 bit, but DMA engine is 38 bit
+ * Ranges < 2 GB are reserved for VPU internal registers
+ * Limit range to 8 GB
+ */
+ if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+ return -EINVAL;
+
+ prot = IVPU_MMU_ENTRY_MAPPED;
+ if (llc_coherent)
+ prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
+
+ mutex_lock(&ctx->lock);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ u64 dma_addr = sg_dma_address(sg) - sg->offset;
+ size_t size = sg_dma_len(sg) + sg->offset;
+
+ ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map context pages\n");
+ mutex_unlock(&ctx->lock);
+ return ret;
+ }
+ ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
+ vpu_addr += size;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ return ret;
+}
+
+void
+ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int ret;
+ u64 i;
+
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
+ ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
+
+ mutex_lock(&ctx->lock);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ size_t size = sg_dma_len(sg) + sg->offset;
+
+ ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
+ ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
+ vpu_addr += size;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+}
+
+int
+ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node)
+{
+ lockdep_assert_held(&ctx->lock);
+
+ return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE,
+ 0, range->start, range->end, DRM_MM_INSERT_BEST);
+}
+
+void
+ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
+{
+ lockdep_assert_held(&ctx->lock);
+
+ drm_mm_remove_node(node);
+}
+
+static int
+ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
+{
+ u64 start, end;
+ int ret;
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->bo_list);
+
+ ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
+ if (ret)
+ return ret;
+
+ if (!context_id) {
+ start = vdev->hw->ranges.global_low.start;
+ end = vdev->hw->ranges.global_high.end;
+ } else {
+ start = vdev->hw->ranges.user_low.start;
+ end = vdev->hw->ranges.user_high.end;
+ }
+
+ drm_mm_init(&ctx->mm, start, end - start);
+ ctx->id = context_id;
+
+ return 0;
+}
+
+static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+{
+ drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd);
+
+ mutex_destroy(&ctx->lock);
+ ivpu_mmu_pgtable_free(vdev, &ctx->pgtable);
+ drm_mm_takedown(&ctx->mm);
+}
+
+int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+}
+
+void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_context_fini(vdev, &vdev->gctx);
+}
+
+void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_file_priv *file_priv;
+
+ xa_lock(&vdev->context_xa);
+
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv)
+ file_priv->has_mmu_faults = true;
+
+ xa_unlock(&vdev->context_xa);
+}
+
+int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
+{
+ int ret;
+
+ drm_WARN_ON(&vdev->drm, !ctx_id);
+
+ ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set page table: %d\n", ret);
+ goto err_context_fini;
+ }
+
+ return 0;
+
+err_context_fini:
+ ivpu_mmu_context_fini(vdev, ctx);
+ return ret;
+}
+
+void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+{
+ drm_WARN_ON(&vdev->drm, !ctx->id);
+
+ ivpu_mmu_clear_pgtable(vdev, ctx->id);
+ ivpu_mmu_context_fini(vdev, ctx);
+}