summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/sched_policy.c
diff options
context:
space:
mode:
authorZhipeng Gong <zhipeng.gong@intel.com>2018-04-04 08:43:53 +0800
committerZhi Wang <zhi.a.wang@intel.com>2018-04-23 13:09:32 +0800
commit89babe7cf18e4f93c6ba1e6abfe2e5aa5e4fc66c (patch)
tree347a03cf59e6bb8ae4cc522876b8160288f9b807 /drivers/gpu/drm/i915/gvt/sched_policy.c
parent292bb0d38a5714440b59ef910404408d5e9a8017 (diff)
drm/i915/gvt: Update time slice more frequently
When there is only one vGPU in GVT-g and it submits workloads continuously, it will not be scheduled out, vgpu_update_timeslice is not called and its sched_in_time is not updated in a long time, which can be several seconds or longer. Once GVT-g pauses to submit workload for this vGPU due to heavy host CPU workload, this vGPU get scheduled out and vgpu_update_timeslice is called, its left_ts will be subtract by a big value from sched_out_time - sched_in_time. When GVT-g is going to submit workload for this vGPU again, it will not be scheduled in until gvt_balance_timeslice reaches stage 0 and reset its left_ts, which introduces several hunderand milliseconds latency. This patch updates time slice in every ms to update sched_in_time timely. v2: revise commit message v3: use more concise expr. (Zhenyu) Signed-off-by: Zhipeng Gong <zhipeng.gong@intel.com> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: Min He <min.he@intel.com> Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/sched_policy.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 8876a57f407c..d053cbe1dc94 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -53,7 +53,6 @@ struct vgpu_sched_data {
bool active;
ktime_t sched_in_time;
- ktime_t sched_out_time;
ktime_t sched_time;
ktime_t left_ts;
ktime_t allocated_ts;
@@ -69,15 +68,19 @@ struct gvt_sched_data {
ktime_t expire_time;
};
-static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
{
ktime_t delta_ts;
- struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+ struct vgpu_sched_data *vgpu_data;
- delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+ if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
+ return;
- vgpu_data->sched_time += delta_ts;
- vgpu_data->left_ts -= delta_ts;
+ vgpu_data = vgpu->sched_data;
+ delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
+ vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
+ vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
+ vgpu_data->sched_in_time = cur_time;
}
#define GVT_TS_BALANCE_PERIOD_MS 100
@@ -151,11 +154,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
}
cur_time = ktime_get();
- if (scheduler->current_vgpu) {
- vgpu_data = scheduler->current_vgpu->sched_data;
- vgpu_data->sched_out_time = cur_time;
- vgpu_update_timeslice(scheduler->current_vgpu);
- }
+ vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
vgpu_data = scheduler->next_vgpu->sched_data;
vgpu_data->sched_in_time = cur_time;
@@ -227,13 +226,13 @@ out:
void intel_gvt_schedule(struct intel_gvt *gvt)
{
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+ ktime_t cur_time;
mutex_lock(&gvt->lock);
+ cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
(void *)&gvt->service_request)) {
- ktime_t cur_time = ktime_get();
-
if (cur_time >= sched_data->expire_time) {
gvt_balance_timeslice(sched_data);
sched_data->expire_time = ktime_add_ms(
@@ -242,6 +241,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
}
clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
+ vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);