summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/execlist.c
diff options
context:
space:
mode:
authorfred gao <fred.gao@intel.com>2017-08-18 15:41:06 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-09-08 14:21:14 +0800
commit0a53bc07f044c4c51eb0dc1386c504db80ca8d00 (patch)
tree4fed92c3c3f44750e2414a1c9846358344478e6e /drivers/gpu/drm/i915/gvt/execlist.c
parentf090a00df9ecdab5d066b099c1797e0070e27a36 (diff)
drm/i915/gvt: Separate cmd scan from request allocation
Currently i915 request structure and shadow ring buffer are allocated before command scan, so it will have to restore to previous states once any error happens afterwards in the long dispatch_workload path. This patch is to introduce a reserved ring buffer created at the beginning of vGPU initialization. Workload will be coped to this reserved buffer and be scanned first, the i915 request and shadow ring buffer are only allocated after the result of scan is successful. To balance the memory usage and buffer alloc time, the coming bigger ring buffer will be reallocated and kept until more bigger buffer is coming. v2: - use kmalloc for the smaller ring buffer, realloc if required. (Zhenyu) v3: - remove the dynamically allocated ring buffer. (Zhenyu) v4: - code style polish. - kfree previous allocated buffer once kmalloc failed. (Zhenyu) Signed-off-by: fred gao <fred.gao@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/execlist.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 91b4300f3b39..1e2c27704be5 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -820,10 +820,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ kfree(vgpu->reserve_ring_buffer_va[i]);
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ vgpu->reserve_ring_buffer_size[i] = 0;
+ }
+
}
+#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
@@ -843,7 +854,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
if (!vgpu->workloads)
return -ENOMEM;
+ /* each ring has a shadow ring buffer until vgpu destroyed */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ vgpu->reserve_ring_buffer_va[i] =
+ kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
+ if (!vgpu->reserve_ring_buffer_va[i]) {
+ gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ goto out;
+ }
+ vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
+ }
return 0;
+out:
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (vgpu->reserve_ring_buffer_size[i]) {
+ kfree(vgpu->reserve_ring_buffer_va[i]);
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ vgpu->reserve_ring_buffer_size[i] = 0;
+ }
+ }
+ return -ENOMEM;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,