summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c')
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c755
1 files changed, 620 insertions, 135 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 192643d63d22..59c5c6b4d994 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -9,6 +9,7 @@
#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
+#include "regs/xe_gtt_defs.h"
#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
@@ -20,6 +21,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
#include "xe_guc_fwif.h"
@@ -32,6 +34,7 @@
#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wopcm.h"
#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
@@ -71,48 +74,27 @@ static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
*/
-static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
+static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
{
- const u32 bytes = num_dwords * sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
- int ret;
-
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(bytes, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
- xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
-
- ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
-
- xe_bo_unpin_map_no_vm(bo);
-
- return ret;
+ return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
}
/*
* Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
* negative error code on failure.
*/
-static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
- const u32 *klvs, u32 num_dwords)
+static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ struct xe_guc_buf buf, u32 num_dwords)
{
int ret;
- xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
-
- ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
+ ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
if (ret != num_klvs) {
int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
struct drm_printer p = xe_gt_info_printer(gt);
char name[8];
@@ -124,14 +106,36 @@ static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs
}
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
+ char name[8];
+ xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs));
xe_guc_klv_print(klvs, num_dwords, &p);
}
return 0;
}
+/*
+ * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
+ * negative error code on failure.
+ */
+static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
+}
+
static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
{
u32 klv[] = {
@@ -207,6 +211,11 @@ static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u
return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
}
+static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
+}
+
static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
{
return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
@@ -231,33 +240,42 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i
}
/* Return: number of configuration dwords written */
-static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
{
u32 n = 0;
- if (xe_ggtt_node_allocated(config->ggtt_region)) {
- if (details) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
- }
-
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(start);
+ cfg[n++] = upper_32_bits(start);
}
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
+ cfg[n++] = lower_32_bits(size);
+ cfg[n++] = upper_32_bits(size);
+
return n;
}
/* Return: number of configuration dwords written */
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
+{
+ struct xe_ggtt_node *node = config->ggtt_region;
+
+ if (!xe_ggtt_node_allocated(node))
+ return 0;
+
+ return encode_ggtt(cfg, node->base.start, node->base.size, details);
+}
+
+/* Return: number of configuration dwords written */
static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
n += encode_config_ggtt(cfg, config, details);
- if (details) {
+ if (details && config->num_ctxs) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
cfg[n++] = config->begin_ctx;
}
@@ -265,7 +283,7 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
cfg[n++] = config->num_ctxs;
- if (details) {
+ if (details && config->num_dbs) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
cfg[n++] = config->begin_db;
}
@@ -275,8 +293,8 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
if (config->lmem_obj) {
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
- cfg[n++] = lower_32_bits(config->lmem_obj->size);
- cfg[n++] = upper_32_bits(config->lmem_obj->size);
+ cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
+ cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
}
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
@@ -299,16 +317,17 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool
static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- u32 max_cfg_dwords = SZ_4K / sizeof(u32);
+ u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
+ CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
u32 num_dwords;
int num_klvs;
u32 *cfg;
int err;
- cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+ cfg = xe_guc_buf_cpu_ptr(buf);
num_dwords = encode_config(cfg, config, true);
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
@@ -324,13 +343,43 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
+ if (vfid == PFID) {
+ u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
+ u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
+
+ /* plain PF config data will never include a real GGTT region */
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
+
+ /* fake PF GGTT config covers full GGTT range except reserved WOPCM */
+ num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
+ }
+
num_klvs = xe_guc_klv_count(cfg, num_dwords);
- err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
+ err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
- kfree(cfg);
return err;
}
+static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
+{
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (reset)
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err)
+ err = pf_push_full_vf_config(gt, vfid);
+
+ return err;
+}
+
+static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_push_vf_cfg(gt, vfid, true);
+}
+
static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -349,7 +398,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
{
u64 spare;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -361,7 +410,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt)
static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
{
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -416,7 +465,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
int err;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
size = round_up(size, alignment);
@@ -427,6 +476,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
return err;
pf_release_vf_config_ggtt(gt, config);
+
+ err = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(err))
+ return err;
}
xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
@@ -461,7 +514,7 @@ static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_ggtt_node *node = config->ggtt_region;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
return xe_ggtt_node_allocated(node) ? node->base.size : 0;
}
@@ -529,7 +582,7 @@ int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
@@ -591,7 +644,7 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -645,6 +698,22 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
return fair;
}
+static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
+{
+ bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
+ u64 shareable = ALIGN_DOWN(GUC_GGTT_TOP, SZ_512M);
+ u64 alignment = pf_get_ggtt_alignment(gt);
+
+ if (admin_only_pf && num_vfs == 1)
+ return ALIGN_DOWN(shareable, alignment);
+
+ /* need to hardcode due to ~512M of GGTT being reserved */
+ if (num_vfs > 56)
+ return SZ_64M - SZ_8M;
+
+ return rounddown_pow_of_two(div_u64(shareable, num_vfs));
+}
+
/**
* xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
* @gt: the &xe_gt (can't be media)
@@ -658,11 +727,12 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
unsigned int num_vfs)
{
+ u64 profile = pf_profile_fair_ggtt(gt, num_vfs);
u64 fair;
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
fair = pf_estimate_fair_ggtt(gt, num_vfs);
@@ -671,9 +741,71 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
if (!fair)
return -ENOSPC;
+ fair = min(fair, profile);
+ if (fair < profile)
+ xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %llu vs %llu)\n",
+ "GGTT", fair, profile);
+
return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
}
+/**
+ * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer.
+ * @gt: the &xe_gt
+ * @vfid: VF identifier (can't be 0)
+ * @buf: the GGTT data destination buffer (or NULL to query the buf size)
+ * @size: the size of the buffer (or 0 to query the buf size)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: size of the buffer needed to save GGTT data if querying,
+ * 0 on successful save or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
+ void *buf, size_t size)
+{
+ struct xe_ggtt_node *node;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !(!buf ^ !size));
+
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ node = pf_pick_vf_config(gt, vfid)->ggtt_region;
+
+ if (!buf)
+ return xe_ggtt_node_pt_size(node);
+
+ return xe_ggtt_node_save(node, buf, size, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer.
+ * @gt: the &xe_gt
+ * @vfid: VF identifier (can't be 0)
+ * @buf: the GGTT data source buffer
+ * @size: the size of the buffer
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size)
+{
+ struct xe_ggtt_node *node;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ node = pf_pick_vf_config(gt, vfid)->ggtt_region;
+
+ return xe_ggtt_node_load(node, buf, size, vfid);
+}
+
static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
{
/* XXX: preliminary */
@@ -752,6 +884,10 @@ static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctx
return ret;
pf_release_config_ctxs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_ctxs)
@@ -868,7 +1004,8 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns
const char *what, const char *(*unit)(u32),
unsigned int last, int err)
{
- xe_gt_assert(gt, first);
+ char name[8];
+
xe_gt_assert(gt, num_vfs);
xe_gt_assert(gt, first <= last);
@@ -876,8 +1013,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns
return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
if (unlikely(err)) {
- xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
- first, first + num_vfs - 1, what);
+ xe_gt_sriov_notice(gt, "Failed to bulk provision %s..VF%u with %s\n",
+ xe_sriov_function_name(first, name, sizeof(name)),
+ first + num_vfs - 1, what);
if (last > first)
pf_config_bulk_set_u32_done(gt, first, last - first, value,
get, what, unit, last, 0);
@@ -886,8 +1024,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns
/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
value = get(gt, first);
- xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
- first, first + num_vfs - 1, value, unit(value), what);
+ xe_gt_sriov_info(gt, "%s..VF%u provisioned with %u%s %s\n",
+ xe_sriov_function_name(first, name, sizeof(name)),
+ first + num_vfs - 1, value, unit(value), what);
return 0;
}
@@ -926,6 +1065,16 @@ int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
"GuC context IDs", no_unit, n, err);
}
+static u32 pf_profile_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
+
+ if (admin_only_pf && num_vfs == 1)
+ return ALIGN_DOWN(GUC_ID_MAX, SZ_1K);
+
+ return rounddown_pow_of_two(GUC_ID_MAX / num_vfs);
+}
+
static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
{
struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
@@ -958,6 +1107,7 @@ static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
unsigned int num_vfs)
{
+ u32 profile = pf_profile_fair_ctxs(gt, num_vfs);
u32 fair;
xe_gt_assert(gt, vfid);
@@ -970,6 +1120,11 @@ int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
if (!fair)
return -ENOSPC;
+ fair = min(fair, profile);
+ if (fair < profile)
+ xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
+ "GuC context IDs", fair, profile);
+
return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
}
@@ -1049,6 +1204,10 @@ static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
return ret;
pf_release_config_dbs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_dbs)
@@ -1170,6 +1329,17 @@ int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
"GuC doorbell IDs", no_unit, n, err);
}
+static u32 pf_profile_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
+
+ /* XXX: preliminary */
+ if (admin_only_pf && num_vfs == 1)
+ return GUC_NUM_DOORBELLS - SZ_16;
+
+ return rounddown_pow_of_two(GUC_NUM_DOORBELLS / (num_vfs + 1));
+}
+
static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
{
struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
@@ -1202,6 +1372,7 @@ static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
unsigned int num_vfs)
{
+ u32 profile = pf_profile_fair_dbs(gt, num_vfs);
u32 fair;
xe_gt_assert(gt, vfid);
@@ -1214,6 +1385,11 @@ int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
if (!fair)
return -ENOSPC;
+ fair = min(fair, profile);
+ if (fair < profile)
+ xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
+ "GuC doorbell IDs", fair, profile);
+
return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
}
@@ -1260,7 +1436,7 @@ static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
struct xe_bo *bo;
bo = config->lmem_obj;
- return bo ? bo->size : 0;
+ return bo ? xe_bo_size(bo) : 0;
}
static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1288,7 +1464,17 @@ static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 si
static void pf_force_lmtt_invalidate(struct xe_device *xe)
{
- /* TODO */
+ struct xe_lmtt *lmtt;
+ struct xe_tile *tile;
+ unsigned int tid;
+
+ xe_assert(xe, xe_device_has_lmtt(xe));
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for_each_tile(tile, xe, tid) {
+ lmtt = &tile->sriov.pf.lmtt;
+ xe_lmtt_invalidate_hw(lmtt);
+ }
}
static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
@@ -1297,7 +1483,7 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
- xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, xe_device_has_lmtt(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
for_each_tile(tile, xe, tid) {
@@ -1318,7 +1504,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
- xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, xe_device_has_lmtt(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
total = 0;
@@ -1349,7 +1535,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
if (err)
goto fail;
- offset += bo->size;
+ offset += xe_bo_size(bo);
}
}
@@ -1364,16 +1550,19 @@ fail:
return err;
}
-static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+/* Return: %true if there was an LMEM provisioned, %false otherwise */
+static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
if (config->lmem_obj) {
xe_bo_unpin_map_no_vm(config->lmem_obj);
config->lmem_obj = NULL;
+ return true;
}
+ return false;
}
static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1386,7 +1575,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, IS_DGFX(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
size = round_up(size, pf_get_lmem_alignment(gt));
@@ -1395,7 +1584,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
if (unlikely(err))
return err;
- pf_reset_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_reset_vf_lmtt(xe, vfid);
pf_release_vf_config_lmem(gt, config);
}
xe_gt_assert(gt, !config->lmem_obj);
@@ -1404,31 +1594,36 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED);
+ bo = xe_bo_create_pin_range_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE), 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE |
+ XE_BO_FLAG_FORCE_USER_VRAM);
if (IS_ERR(bo))
return PTR_ERR(bo);
config->lmem_obj = bo;
- err = pf_update_vf_lmtt(xe, vfid);
- if (unlikely(err))
- goto release;
+ if (xe_device_has_lmtt(xe)) {
+ err = pf_update_vf_lmtt(xe, vfid);
+ if (unlikely(err))
+ goto release;
+ }
- err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
+ err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
if (unlikely(err))
goto reset_lmtt;
xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
- vfid, bo->size, bo->size / SZ_1M);
+ vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
return 0;
reset_lmtt:
- pf_reset_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_reset_vf_lmtt(xe, vfid);
release:
pf_release_vf_config_lmem(gt, config);
return err;
@@ -1469,6 +1664,9 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
+ return -EPERM;
+
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
err = pf_provision_vf_lmem(gt, vfid, size);
@@ -1499,7 +1697,7 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
int err = 0;
xe_gt_assert(gt, vfid);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
if (!num_vfs)
return 0;
@@ -1517,11 +1715,37 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
"LMEM", n, err);
}
+static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->lmem_obj;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function can only be called on PF.
+ * The caller is responsible for calling xe_bo_put() on the returned object.
+ *
+ * Return: pointer to struct &xe_bo backing VF LMEM (if any).
+ */
+struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, vfid);
+
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid));
+}
+
static u64 pf_query_free_lmem(struct xe_gt *gt)
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
@@ -1540,8 +1764,6 @@ static u64 pf_query_max_lmem(struct xe_gt *gt)
#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
-#else
-#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
#endif
static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
@@ -1551,7 +1773,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
- fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
@@ -1578,9 +1799,9 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
+ xe_gt_assert(gt, xe_gt_is_main_type(gt));
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
return 0;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
@@ -1612,7 +1833,7 @@ int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, vfid);
xe_gt_assert(gt, num_vfs);
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
result = result ?: err;
err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
@@ -1645,7 +1866,7 @@ static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
return 0;
}
-static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
+static u32 pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1653,47 +1874,107 @@ static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
}
/**
- * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
+ * xe_gt_sriov_pf_config_set_exec_quantum_locked() - Configure PF/VF execution quantum.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
* @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
*
- * This function can only be called on PF.
+ * This function can only be called on PF with the master mutex hold.
+ * It will log the provisioned value or an error in case of the failure.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
- u32 exec_quantum)
+int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
{
int err;
- mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
- mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return pf_config_set_u32_done(gt, vfid, exec_quantum,
- xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
+ pf_get_exec_quantum(gt, vfid),
"execution quantum", exec_quantum_unit, err);
}
/**
- * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
+ * xe_gt_sriov_pf_config_set_exec_quantum() - Configure PF/VF execution quantum.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
+ * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
*
* This function can only be called on PF.
+ * It will log the provisioned value or an error in case of the failure.
*
- * Return: VF's (or PF's) execution quantum in milliseconds.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
+{
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, exec_quantum);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_exec_quantum_locked() - Get PF/VF execution quantum.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: execution quantum in milliseconds (or 0 if infinity).
+ */
+u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_get_exec_quantum(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_exec_quantum() - Get PF/VF execution quantum.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: execution quantum in milliseconds (or 0 if infinity).
*/
u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
{
- u32 exec_quantum;
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
- mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- exec_quantum = pf_get_exec_quantum(gt, vfid);
- mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return pf_get_exec_quantum(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked() - Configure EQ for PF and VFs.
+ * @gt: the &xe_gt to configure
+ * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum)
+{
+ unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+ int err = 0;
- return exec_quantum;
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n <= totalvfs; n++) {
+ err = pf_provision_exec_quantum(gt, VFID(n), exec_quantum);
+ if (err)
+ break;
+ }
+
+ return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, exec_quantum,
+ pf_get_exec_quantum, "execution quantum",
+ exec_quantum_unit, n, err);
}
static const char *preempt_timeout_unit(u32 preempt_timeout)
@@ -1716,7 +1997,7 @@ static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
return 0;
}
-static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+static u32 pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1724,9 +2005,34 @@ static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
}
/**
- * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
+ * xe_gt_sriov_pf_config_set_preempt_timeout_locked() - Configure PF/VF preemption timeout.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
+ * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
+ *
+ * This function can only be called on PF with the master mutex hold.
+ * It will log the provisioned value or an error in case of the failure.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
+{
+ int err;
+
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
+
+ return pf_config_set_u32_done(gt, vfid, preempt_timeout,
+ pf_get_preempt_timeout(gt, vfid),
+ "preemption timeout", preempt_timeout_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_preempt_timeout() - Configure PF/VF preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
* @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
*
* This function can only be called on PF.
@@ -1736,35 +2042,140 @@ static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
u32 preempt_timeout)
{
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, preempt_timeout);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_preempt_timeout_locked() - Get PF/VF preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: preemption timeout in microseconds (or 0 if infinity).
+ */
+u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_get_preempt_timeout(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_preempt_timeout() - Get PF/VF preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: preemption timeout in microseconds (or 0 if infinity).
+ */
+u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+{
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_get_preempt_timeout(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked() - Configure PT for PF and VFs.
+ * @gt: the &xe_gt to configure
+ * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout)
+{
+ unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+ int err = 0;
+
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n <= totalvfs; n++) {
+ err = pf_provision_preempt_timeout(gt, VFID(n), preempt_timeout);
+ if (err)
+ break;
+ }
+
+ return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, preempt_timeout,
+ pf_get_preempt_timeout, "preemption timeout",
+ preempt_timeout_unit, n, err);
+}
+
+static const char *sched_priority_unit(u32 priority)
+{
+ return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
+ priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
+ priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
+ "(?)";
+}
+
+static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
+ if (unlikely(err))
+ return err;
+
+ config->sched_priority = priority;
+ return 0;
+}
+
+static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->sched_priority;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @priority: requested scheduling priority
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
int err;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
+ err = pf_provision_sched_priority(gt, vfid, priority);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
- return pf_config_set_u32_done(gt, vfid, preempt_timeout,
- xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
- "preemption timeout", preempt_timeout_unit, err);
+ return pf_config_set_u32_done(gt, vfid, priority,
+ xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
+ "scheduling priority", sched_priority_unit, err);
}
/**
- * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
+ * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
* @gt: the &xe_gt
* @vfid: the VF identifier
*
* This function can only be called on PF.
*
- * Return: VF's (or PF's) preemption timeout in microseconds.
+ * Return: VF's (or PF's) scheduling priority.
*/
-u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
{
- u32 preempt_timeout;
+ u32 priority;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- preempt_timeout = pf_get_preempt_timeout(gt, vfid);
+ priority = pf_get_sched_priority(gt, vfid);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
- return preempt_timeout;
+ return priority;
}
static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
@@ -1868,12 +2279,14 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
+ bool released;
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
- pf_release_vf_config_lmem(gt, config);
- pf_update_vf_lmtt(xe, vfid);
+ released = pf_release_vf_config_lmem(gt, config);
+ if (released && xe_device_has_lmtt(xe))
+ pf_update_vf_lmtt(xe, vfid);
}
}
pf_release_config_ctxs(gt, config);
@@ -1959,7 +2372,7 @@ static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
* Only GGTT and LMEM requires to be cleared by the PF.
* GuC doorbell IDs and context IDs do not need any clearing.
*/
- if (!xe_gt_is_media_type(gt)) {
+ if (xe_gt_is_main_type(gt)) {
pf_sanitize_ggtt(config->ggtt_region, vfid);
if (IS_DGFX(xe))
err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
@@ -2011,10 +2424,7 @@ int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh
xe_gt_assert(gt, vfid);
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- if (refresh)
- err = pf_send_vf_cfg_reset(gt, vfid);
- if (!err)
- err = pf_push_full_vf_config(gt, vfid);
+ err = pf_push_vf_cfg(gt, vfid, refresh);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (unlikely(err)) {
@@ -2029,7 +2439,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
- bool is_primary = !xe_gt_is_media_type(gt);
+ bool is_primary = xe_gt_is_main_type(gt);
bool valid_ggtt, valid_ctxs, valid_dbs;
bool valid_any, valid_all;
@@ -2045,8 +2455,8 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_all = valid_all && valid_ggtt;
valid_any = valid_any || (valid_ggtt && is_primary);
- if (IS_DGFX(xe)) {
- bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
+ if (xe_device_has_lmtt(xe)) {
+ bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
@@ -2087,7 +2497,7 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
*
* This function can only be called on PF.
*
- * Return: mininum size of the buffer or the number of bytes saved,
+ * Return: minimum size of the buffer or the number of bytes saved,
* or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
@@ -2229,7 +2639,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return -EINVAL;
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- struct drm_printer p = xe_gt_info_printer(gt);
+ struct drm_printer p = xe_gt_dbg_printer(gt);
drm_printf(&p, "restoring VF%u config:\n", vfid);
xe_guc_klv_print(buf, size / sizeof(u32), &p);
@@ -2246,6 +2656,73 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static void pf_prepare_self_config(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
+
+ /*
+ * We want PF to be allowed to use all of context ID, doorbells IDs
+ * and whole usable GGTT area. While we can store ctxs/dbs numbers
+ * directly in the config structure, can't do the same with the GGTT
+ * configuration, so let it be prepared on demand while pushing KLVs.
+ */
+ config->num_ctxs = GUC_ID_MAX;
+ config->num_dbs = GUC_NUM_DOORBELLS;
+}
+
+static int pf_push_self_config(struct xe_gt *gt)
+{
+ int err;
+
+ err = pf_push_full_vf_config(gt, PFID);
+ if (err) {
+ xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
+ return 0;
+}
+
+static void fini_config(void *arg)
+{
+ struct xe_gt *gt = arg;
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = 1; n <= total_vfs; n++)
+ pf_release_vf_config(gt, n);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+}
+
+/**
+ * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_prepare_self_config(gt);
+ err = pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
+}
+
/**
* xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
* @gt: the &xe_gt
@@ -2260,6 +2737,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
unsigned int fail = 0, skip = 0;
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
for (n = 1; n <= total_vfs; n++) {
if (xe_gt_sriov_pf_config_is_empty(gt, n))
skip++;
@@ -2403,10 +2884,10 @@ int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
if (!config->lmem_obj)
continue;
- string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
buf, sizeof(buf));
drm_printf(p, "VF%u:\t%zu\t(%s)\n",
- n, config->lmem_obj->size, buf);
+ n, xe_bo_size(config->lmem_obj), buf);
}
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
@@ -2451,3 +2932,7 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin
return 0;
}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_gt_sriov_pf_config_kunit.c"
+#endif