diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/aperture_gm.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/aperture_gm.c | 120 |
1 files changed, 65 insertions, 55 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 359d37d5c958..8cc6e712b0f7 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -34,16 +34,21 @@ * */ +#include <drm/drm_print.h> + #include "i915_drv.h" +#include "i915_reg.h" +#include "gt/intel_ggtt_fencing.h" #include "gvt.h" static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_gt *gt = gvt->gt; unsigned int flags; u64 start, end, size; struct drm_mm_node *node; + intel_wakeref_t wakeref; int ret; if (high_gm) { @@ -60,14 +65,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) flags = PIN_MAPPABLE; } - mutex_lock(&dev_priv->drm.struct_mutex); - mmio_hw_access_pre(dev_priv); - ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, + mutex_lock(>->ggtt->vm.mutex); + wakeref = mmio_hw_access_pre(gt); + ret = i915_gem_gtt_insert(>->ggtt->vm, NULL, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); - mmio_hw_access_post(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); + mmio_hw_access_post(gt, wakeref); + mutex_unlock(>->ggtt->vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", high_gm ? "high" : "low"); @@ -78,7 +83,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) static int alloc_vgpu_gm(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_gt *gt = gvt->gt; int ret; ret = alloc_gm(vgpu, false); @@ -97,20 +102,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) return 0; out_free_aperture: - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(>->ggtt->vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(>->ggtt->vm.mutex); return ret; } static void free_vgpu_gm(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gt *gt = gvt->gt; - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(>->ggtt->vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(>->ggtt->vm.mutex); } /** @@ -127,28 +133,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct drm_i915_private *i915 = gvt->gt->i915; + struct intel_uncore *uncore = gvt->gt->uncore; + struct i915_fence_reg *reg; i915_reg_t fence_reg_lo, fence_reg_hi; - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(uncore->rpm); - if (WARN_ON(fence >= vgpu_fence_sz(vgpu))) + if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu))) return; reg = vgpu->fence.regs[fence]; - if (WARN_ON(!reg)) + if (drm_WARN_ON(&i915->drm, !reg)) return; fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); - I915_WRITE(fence_reg_lo, 0); - POSTING_READ(fence_reg_lo); + intel_uncore_write(uncore, fence_reg_lo, 0); + intel_uncore_posting_read(uncore, fence_reg_lo); - I915_WRITE(fence_reg_hi, upper_32_bits(value)); - I915_WRITE(fence_reg_lo, lower_32_bits(value)); - POSTING_READ(fence_reg_lo); + intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value)); + intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value)); + intel_uncore_posting_read(uncore, fence_reg_lo); } static void _clear_vgpu_fence(struct intel_vgpu *vgpu) @@ -162,41 +169,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu) static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct intel_uncore *uncore = gvt->gt->uncore; + struct i915_fence_reg *reg; + intel_wakeref_t wakeref; u32 i; - if (WARN_ON(!vgpu_fence_sz(vgpu))) + if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu))) return; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(uncore->rpm); - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&gvt->gt->ggtt->vm.mutex); _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&gvt->gt->ggtt->vm.mutex); - intel_runtime_pm_put(dev_priv); + intel_runtime_pm_put(uncore->rpm, wakeref); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct intel_uncore *uncore = gvt->gt->uncore; + struct i915_fence_reg *reg; + intel_wakeref_t wakeref; int i; - intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(uncore->rpm); /* Request fences from host */ - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&gvt->gt->ggtt->vm.mutex); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { - reg = i915_reserve_fence(dev_priv); + reg = i915_reserve_fence(gvt->gt->ggtt); if (IS_ERR(reg)) goto out_free_fence; @@ -205,9 +214,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + mutex_unlock(&gvt->gt->ggtt->vm.mutex); + intel_runtime_pm_put(uncore->rpm, wakeref); return 0; + out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); /* Return fences to host, if fail */ @@ -218,8 +228,8 @@ out_free_fence: i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); + mutex_unlock(&gvt->gt->ggtt->vm.mutex); + intel_runtime_pm_put(uncore->rpm, wakeref); return -ENOSPC; } @@ -233,13 +243,13 @@ static void free_resource(struct intel_vgpu *vgpu) } static int alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) + const struct intel_vgpu_config *conf) { struct intel_gvt *gvt = vgpu->gvt; unsigned long request, avail, max, taken; const char *item; - if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { + if (!conf->low_mm || !conf->high_mm || !conf->fence) { gvt_vgpu_err("Invalid vGPU creation params\n"); return -EINVAL; } @@ -248,7 +258,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; taken = gvt->gm.vgpu_allocated_low_gm_size; avail = max - taken; - request = MB_TO_BYTES(param->low_gm_sz); + request = conf->low_mm; if (request > avail) goto no_enough_resource; @@ -259,7 +269,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; taken = gvt->gm.vgpu_allocated_high_gm_size; avail = max - taken; - request = MB_TO_BYTES(param->high_gm_sz); + request = conf->high_mm; if (request > avail) goto no_enough_resource; @@ -270,16 +280,16 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_fence_sz(gvt) - HOST_FENCE; taken = gvt->fence.vgpu_allocated_fence_num; avail = max - taken; - request = param->fence_sz; + request = conf->fence; if (request > avail) goto no_enough_resource; vgpu_fence_sz(vgpu) = request; - gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz); - gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz); - gvt->fence.vgpu_allocated_fence_num += param->fence_sz; + gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm; + gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm; + gvt->fence.vgpu_allocated_fence_num += conf->fence; return 0; no_enough_resource: @@ -291,7 +301,7 @@ no_enough_resource: } /** - * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU + * intel_vgpu_free_resource() - free HW resource owned by a vGPU * @vgpu: a vGPU * * This function is used to free the HW resource owned by a vGPU. @@ -313,17 +323,17 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) */ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; + intel_wakeref_t wakeref; - intel_runtime_pm_get(dev_priv); - _clear_vgpu_fence(vgpu); - intel_runtime_pm_put(dev_priv); + with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref) + _clear_vgpu_fence(vgpu); } /** - * intel_alloc_vgpu_resource - allocate HW resource for a vGPU + * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU * @vgpu: vGPU - * @param: vGPU creation params + * @conf: vGPU creation params * * This function is used to allocate HW resource for a vGPU. User specifies * the resource configuration through the creation params. @@ -333,11 +343,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) * */ int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) + const struct intel_vgpu_config *conf) { int ret; - ret = alloc_resource(vgpu, param); + ret = alloc_resource(vgpu, conf); if (ret) return ret; |
