diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_query.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_query.c | 143 |
1 files changed, 113 insertions, 30 deletions
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 170ae72d1a7b..1c0915e2cc16 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -16,14 +16,19 @@ #include "regs/xe_gt_regs.h" #include "xe_bo.h" #include "xe_device.h" +#include "xe_eu_stall.h" #include "xe_exec_queue.h" #include "xe_force_wake.h" #include "xe_ggtt.h" #include "xe_gt.h" +#include "xe_gt_topology.h" #include "xe_guc_hwconfig.h" #include "xe_macros.h" #include "xe_mmio.h" +#include "xe_oa.h" +#include "xe_pxp.h" #include "xe_ttm_vram_mgr.h" +#include "xe_vram_types.h" #include "xe_wa.h" static const u16 xe_to_user_engine_class[] = { @@ -119,6 +124,9 @@ query_engine_cycles(struct xe_device *xe, struct xe_gt *gt; unsigned int fw_ref; + if (IS_SRIOV_VF(xe)) + return -EOPNOTSUPP; + if (query->size == 0) { query->size = size; return 0; @@ -135,7 +143,7 @@ query_engine_cycles(struct xe_device *xe, return -EINVAL; eci = &resp.eci; - if (eci->gt_id >= XE_MAX_GT_PER_TILE) + if (eci->gt_id >= xe->info.max_gt_per_tile) return -EINVAL; gt = xe_device_get_gt(xe, eci->gt_id); @@ -268,8 +276,7 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[0].instance = 0; mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; - if (perfmon_capable()) - mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); + mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); mem_regions->num_mem_regions = 1; for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { @@ -285,13 +292,11 @@ static int query_mem_regions(struct xe_device *xe, mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = man->size; - if (perfmon_capable()) { - xe_ttm_vram_get_used(man, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].used, - &mem_regions->mem_regions - [mem_regions->num_mem_regions].cpu_visible_used); - } + xe_ttm_vram_get_used(man, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].used, + &mem_regions->mem_regions + [mem_regions->num_mem_regions].cpu_visible_used); mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = xe_ttm_vram_get_cpu_visible_size(man); @@ -331,9 +336,14 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) config->num_params = num_params; config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] = xe->info.devid | (xe->info.revid << 16); - if (xe_device_get_root_tile(xe)->mem.vram.usable_size) - config->info[DRM_XE_QUERY_CONFIG_FLAGS] = + if (xe->mem.vram) + config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM; + if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) + config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= + DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR; + config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= + DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY; config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; @@ -357,6 +367,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query struct drm_xe_query_gt_list __user *query_ptr = u64_to_user_ptr(query->data); struct drm_xe_query_gt_list *gt_list; + int iter = 0; u8 id; if (query->size == 0) { @@ -374,12 +385,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query for_each_gt(gt, xe, id) { if (xe_gt_is_media_type(gt)) - gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA; + gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA; else - gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN; - gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id; - gt_list->gt_list[id].gt_id = gt->info.id; - gt_list->gt_list[id].reference_clock = gt->info.reference_clock; + gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN; + gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id; + gt_list->gt_list[iter].gt_id = gt->info.id; + gt_list->gt_list[iter].reference_clock = gt->info.reference_clock; /* * The mem_regions indexes in the mask below need to * directly identify the struct @@ -395,19 +406,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query * assumption. */ if (!IS_DGFX(xe)) - gt_list->gt_list[id].near_mem_regions = 0x1; + gt_list->gt_list[iter].near_mem_regions = 0x1; else - gt_list->gt_list[id].near_mem_regions = - BIT(gt_to_tile(gt)->id) << 1; - gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^ - gt_list->gt_list[id].near_mem_regions; + gt_list->gt_list[iter].near_mem_regions = + BIT(gt_to_tile(gt)->mem.vram->id) << 1; + gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ + gt_list->gt_list[iter].near_mem_regions; - gt_list->gt_list[id].ip_ver_major = + gt_list->gt_list[iter].ip_ver_major = REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid); - gt_list->gt_list[id].ip_ver_minor = + gt_list->gt_list[iter].ip_ver_minor = REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid); - gt_list->gt_list[id].ip_ver_rev = + gt_list->gt_list[iter].ip_ver_rev = REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid); + + iter++; } if (copy_to_user(query_ptr, gt_list, size)) { @@ -423,7 +436,7 @@ static int query_hwconfig(struct xe_device *xe, struct drm_xe_device_query *query) { struct xe_gt *gt = xe_root_mmio_gt(xe); - size_t size = xe_guc_hwconfig_size(>->uc.guc); + size_t size = gt ? xe_guc_hwconfig_size(>->uc.guc) : 0; void __user *query_ptr = u64_to_user_ptr(query->data); void *hwconfig; @@ -462,7 +475,7 @@ static size_t calc_topo_query_size(struct xe_device *xe) sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); /* L3bank mask may not be available for some GTs */ - if (!XE_WA(gt, no_media_l3)) + if (xe_gt_topology_report_l3(gt)) query_size += sizeof(struct drm_xe_query_topology_mask) + sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); } @@ -525,7 +538,7 @@ static int query_gt_topology(struct xe_device *xe, * mask, then it's better to omit L3 from the query rather than * reporting bogus or zeroed information to userspace. */ - if (!XE_WA(gt, no_media_l3)) { + if (xe_gt_topology_report_l3(gt)) { topo.type = DRM_XE_TOPO_L3_BANK; err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, sizeof(gt->fuse_topo.l3_bank_mask)); @@ -670,8 +683,10 @@ static int query_oa_units(struct xe_device *xe, du->oa_unit_id = u->oa_unit_id; du->oa_unit_type = u->type; du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); - du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS; - + du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | + DRM_XE_OA_CAPS_OA_BUFFER_SIZE | + DRM_XE_OA_CAPS_WAIT_NUM_REPORTS | + DRM_XE_OA_CAPS_OAM; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { if (!xe_hw_engine_is_reserved(hwe) && @@ -695,6 +710,72 @@ static int query_oa_units(struct xe_device *xe, return ret ? -EFAULT : 0; } +static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) +{ + struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data); + size_t size = sizeof(struct drm_xe_query_pxp_status); + struct drm_xe_query_pxp_status resp = { 0 }; + int ret; + + if (query->size == 0) { + query->size = size; + return 0; + } else if (XE_IOCTL_DBG(xe, query->size != size)) { + return -EINVAL; + } + + ret = xe_pxp_get_readiness_status(xe->pxp); + if (ret < 0) + return ret; + + resp.status = ret; + resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM); + + if (copy_to_user(query_ptr, &resp, size)) + return -EFAULT; + + return 0; +} + +static int query_eu_stall(struct xe_device *xe, + struct drm_xe_device_query *query) +{ + void __user *query_ptr = u64_to_user_ptr(query->data); + struct drm_xe_query_eu_stall *info; + size_t size, array_size; + const u64 *rates; + u32 num_rates; + int ret; + + if (!xe_eu_stall_supported_on_platform(xe)) + return -ENODEV; + + array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates); + size = sizeof(struct drm_xe_query_eu_stall) + array_size; + + if (query->size == 0) { + query->size = size; + return 0; + } else if (XE_IOCTL_DBG(xe, query->size != size)) { + return -EINVAL; + } + + info = kzalloc(size, GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->num_sampling_rates = num_rates; + info->capabilities = DRM_XE_EU_STALL_CAPS_BASE; + info->record_size = xe_eu_stall_data_record_size(xe); + info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size(); + memcpy(info->sampling_rates, rates, array_size); + + ret = copy_to_user(query_ptr, info, size); + kfree(info); + + return ret ? -EFAULT : 0; +} + static int (* const xe_query_funcs[])(struct xe_device *xe, struct drm_xe_device_query *query) = { query_engines, @@ -706,6 +787,8 @@ static int (* const xe_query_funcs[])(struct xe_device *xe, query_engine_cycles, query_uc_fw_version, query_oa_units, + query_pxp_status, + query_eu_stall, }; int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
