summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h2
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile36
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h45
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h91
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c3
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c2
-rw-r--r--drivers/gpu/drm/ci/arm64.config4
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_buddy.c11
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c5
-rw-r--r--drivers/gpu/drm/gma500/mmu.c1
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c1
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h12
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c20
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c5
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h8
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c36
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c48
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h2
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h4
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c78
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c60
-rw-r--r--drivers/gpu/drm/xe/Makefile10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c1
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c12
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c6
105 files changed, 631 insertions, 487 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 1f6b56ec99f6..078f588e99eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,7 +23,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-FULL_AMD_PATH=$(srctree)/$(src)/..
+FULL_AMD_PATH=$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 7ba05f030dd1..e3738d417245 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -455,6 +455,9 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
else
mem_info->local_mem_size_private =
KFD_XCP_MEMORY_SIZE(adev, xcp->id);
+ } else if (adev->flags & AMD_IS_APU) {
+ mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
+ mem_info->local_mem_size_private = 0;
} else {
mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
mem_info->local_mem_size_private = adev->gmc.real_vram_size -
@@ -824,6 +827,8 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
}
do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
return ALIGN_DOWN(tmp, PAGE_SIZE);
+ } else if (adev->flags & AMD_IS_APU) {
+ return (ttm_tt_pages_limit() << PAGE_SHIFT);
} else {
return adev->gmc.real_vram_size;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e4d4e55c08ad..48ad0c04aa72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
- if (adev->gmc.is_app_apu) {
+ if (adev->flags & AMD_IS_APU) {
system_mem_needed = size;
ttm_mem_needed = size;
}
@@ -232,7 +232,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
"adev reference can't be null when vram is used");
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
- adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ?
+ adev->kfd.vram_used_aligned[xcp_id] +=
+ (adev->flags & AMD_IS_APU) ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
@@ -260,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
- if (adev->gmc.is_app_apu) {
+ if (adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
@@ -889,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev && !adev->gmc.is_app_apu) &&
+ if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -1188,7 +1189,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
int ret;
ctx->sync = &mem->sync;
- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) {
@@ -1656,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
- if (adev->gmc.is_app_apu) {
+ if (adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
@@ -1704,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->gmc.is_app_apu) {
+ if (adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1951,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- (adev->gmc.is_app_apu &&
+ ((adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
@@ -2373,8 +2375,9 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->dmabuf = dma_buf;
(*mem)->bo = bo;
(*mem)->va = va;
- (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
- AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
+ !(adev->flags & AMD_IS_APU) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = avm->process_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index a6d64bdbbb14..108003bdf1e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -212,6 +212,7 @@ union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
struct atom_integrated_system_info_v2_1 v21;
+ struct atom_integrated_system_info_v2_3 v23;
};
union umc_info {
@@ -360,6 +361,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
+ case 3:
+ mem_channel_number = igp_info->v23.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v23.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 861ccff78af9..932dc93b2e63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5944,13 +5944,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
- while ((parent = pci_upstream_bridge(parent))) {
- /* skip upstream/downstream switches internal to dGPU*/
- if (parent->vendor == PCI_VENDOR_ID_ATI)
- continue;
- *speed = pcie_get_speed_cap(parent);
- *width = pcie_get_width_cap(parent);
- break;
+ if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
+ while ((parent = pci_upstream_bridge(parent))) {
+ /* skip upstream/downstream switches internal to dGPU*/
+ if (parent->vendor == PCI_VENDOR_ID_ATI)
+ continue;
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ break;
+ }
+ } else {
+ /* use the current speeds rather than max if switching is not supported */
+ pcie_bandwidth_available(adev->pdev, NULL, speed, width);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 665c63f55278..013ff373e067 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -279,7 +279,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (!amdgpu_msi_ok(adev))
- flags = PCI_IRQ_LEGACY;
+ flags = PCI_IRQ_INTX;
else
flags = PCI_IRQ_ALL_TYPES;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index c8980d5f6540..7021c4a66fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
-#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index f539b1d00234..7aafeb763e5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -178,10 +178,10 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_fast_assign(
__entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
+ __assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
+ __assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,10 +203,10 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_fast_assign(
__entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
+ __assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
+ __assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -231,7 +231,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->pasid = vm->pasid;
- __assign_str(ring, ring->name);
+ __assign_str(ring);
__entry->vmid = job->vmid;
__entry->vm_hub = ring->vm_hub,
__entry->pd_addr = job->vm_pd_addr;
@@ -425,7 +425,7 @@ TRACE_EVENT(amdgpu_vm_flush,
),
TP_fast_assign(
- __assign_str(ring, ring->name);
+ __assign_str(ring);
__entry->vmid = vmid;
__entry->vm_hub = ring->vm_hub;
__entry->pd_addr = pd_addr;
@@ -526,7 +526,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __assign_str(ring, sched_job->base.sched->name);
+ __assign_str(ring);
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
@@ -563,7 +563,7 @@ TRACE_EVENT(amdgpu_runpm_reference_dumps,
),
TP_fast_assign(
__entry->index = index;
- __assign_str(func, func);
+ __assign_str(func);
),
TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
__entry->index,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 7fdd306a48a0..f07647a9a9d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry)
{
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
- struct amdgpu_bo *bo = parent->bo, *pbo;
+ struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags;
unsigned int level;
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 414ea3f560a7..d4e2aed2efa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->gmc.num_mem_partitions == num_xcc / 2)
return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
- AMDGPU_QPX_PARTITION_MODE;
+ AMDGPU_CPX_PARTITION_MODE;
if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
return AMDGPU_DPX_PARTITION_MODE;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9596bca57212..afc57df421cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
- if ((adev->pdev->device == 0x7460 &&
- adev->pdev->revision == 0x00) ||
- (adev->pdev->device == 0x7461 &&
- adev->pdev->revision == 0x00))
- /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
- gfx_target_version = 110005;
- else
- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
- gfx_target_version = 110001;
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 5, 0):
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4bcfbeac48fb..8ee3d07ffbdf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
- if (adev->gmc.is_app_apu)
+ if (adev->flags & AMD_IS_APU)
return 0;
pgmap = &kfddev->pgmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 386875e6eb96..31e500859ab0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2619,7 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
- if (node->adev->gmc.is_app_apu)
+ if (node->adev->flags & AMD_IS_APU)
return 0;
if (prange->preferred_loc == gpuid ||
@@ -3337,7 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
- if (bo_node->adev->gmc.is_app_apu) {
+ if (bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0;
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 026863a0abcd..70c1776611c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -201,7 +201,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
- (adev)->gmc.is_app_apu)
+ ((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 901d1961b739..5fcd4f778dc3 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
+ select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c27063305a13..2c36f3d00ca2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -363,7 +363,7 @@ void dm_helpers_dp_mst_send_payload_allocation(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
+ ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 35733d5c5193..a5e1a93ddaea 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
&connector->base,
dev->mode_config.tile_property,
0);
+ connector->colorspace_property = master->base.colorspace_property;
+ if (connector->colorspace_property)
+ drm_connector_attach_colorspace_property(connector);
drm_connector_set_path_property(connector, pathprop);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 133af994a08c..4686d4b0cbad 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -87,7 +87,7 @@ TRACE_EVENT(amdgpu_dc_performance,
__entry->writes = write_count;
__entry->read_delta = read_count - *last_read;
__entry->write_delta = write_count - *last_write;
- __assign_str(func, func);
+ __assign_str(func);
__entry->line = line;
*last_read = read_count;
*last_write = write_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index 4ae4720535a5..e46f8ce41d87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -26,16 +26,7 @@
#include "dc_trace.h"
-#if defined(CONFIG_X86)
-#include <asm/fpu/api.h>
-#elif defined(CONFIG_PPC64)
-#include <asm/switch_to.h>
-#include <asm/cputable.h>
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#elif defined(CONFIG_LOONGARCH)
-#include <asm/fpu.h>
-#endif
+#include <linux/fpu.h>
/**
* DOC: DC FPU manipulation overview
@@ -87,20 +78,9 @@ void dc_fpu_begin(const char *function_name, const int line)
WARN_ON_ONCE(!in_task());
preempt_disable();
depth = __this_cpu_inc_return(fpu_recursion_depth);
-
if (depth == 1) {
-#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ BUG_ON(!kernel_fpu_available());
kernel_fpu_begin();
-#elif defined(CONFIG_PPC64)
- if (cpu_has_feature(CPU_FTR_VSX_COMP))
- enable_kernel_vsx();
- else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
- enable_kernel_altivec();
- else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
- enable_kernel_fp();
-#elif defined(CONFIG_ARM64)
- kernel_neon_begin();
-#endif
}
TRACE_DCN_FPU(true, function_name, line, depth);
@@ -122,18 +102,7 @@ void dc_fpu_end(const char *function_name, const int line)
depth = __this_cpu_dec_return(fpu_recursion_depth);
if (depth == 0) {
-#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
kernel_fpu_end();
-#elif defined(CONFIG_PPC64)
- if (cpu_has_feature(CPU_FTR_VSX_COMP))
- disable_kernel_vsx();
- else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
- disable_kernel_altivec();
- else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
- disable_kernel_fp();
-#elif defined(CONFIG_ARM64)
- kernel_neon_end();
-#endif
} else {
WARN_ON_ONCE(depth < 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index c4a5efd2dda5..a94b6d546cd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -25,40 +25,8 @@
# It provides the general basic services required by other DAL
# subcomponents.
-ifdef CONFIG_X86
-dml_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float
-dml_ccflags := $(dml_ccflags-y) -msse
-endif
-
-ifdef CONFIG_PPC64
-dml_ccflags := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
-ifdef CONFIG_LOONGARCH
-dml_ccflags := -mfpu=64
-dml_rcflags := -msoft-float
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifneq ($(call gcc-min-version, 70100),y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-dml_ccflags += -mpreferred-stack-boundary=4
-else
-dml_ccflags += -msse2
-endif
-endif
+dml_ccflags := $(CC_FLAGS_FPU)
+dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 1c9498a72520..c576bb0c780f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -24,40 +24,8 @@
#
# Makefile for dml2.
-ifdef CONFIG_X86
-dml2_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float
-dml2_ccflags := $(dml2_ccflags-y) -msse
-endif
-
-ifdef CONFIG_PPC64
-dml2_ccflags := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_ARM64
-dml2_rcflags := -mgeneral-regs-only
-endif
-
-ifdef CONFIG_LOONGARCH
-dml2_ccflags := -mfpu=64
-dml2_rcflags := -msoft-float
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-dml2_ccflags += -mpreferred-stack-boundary=4
-else
-dml2_ccflags += -msse2
-endif
-endif
+dml2_ccflags := $(CC_FLAGS_FPU)
+dml2_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index af3eebb4c9bc..571691837200 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1657,6 +1657,49 @@ struct atom_integrated_system_info_v2_2
uint32_t reserved4[189];
};
+struct uma_carveout_option {
+ char optionName[29]; //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits
+ uint8_t memoryCarvedGb; //memory carved out with setting
+ uint8_t memoryRemainingGb; //memory remaining on system
+ union {
+ struct _flags {
+ uint8_t Auto : 1;
+ uint8_t Custom : 1;
+ uint8_t Reserved : 6;
+ } flags;
+ uint8_t all8;
+ } uma_carveout_option_flags;
+};
+
+struct atom_integrated_system_info_v2_3 {
+ struct atom_common_table_header table_header;
+ uint32_t vbios_misc; // enum of atom_system_vbiosmisc_def
+ uint32_t gpucapinfo; // enum of atom_system_gpucapinf_def
+ uint32_t system_config;
+ uint32_t cpucapinfo;
+ uint16_t gpuclk_ss_percentage; // unit of 0.001%, 1000 mean 1%
+ uint16_t gpuclk_ss_type;
+ uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
+ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
+ uint8_t umachannelnumber; // number of memory channels
+ uint8_t htc_hyst_limit;
+ uint8_t htc_tmp_limit;
+ uint8_t reserved1; // dp_ss_control
+ uint8_t gpu_package_id;
+ struct edp_info_table edp1_info;
+ struct edp_info_table edp2_info;
+ uint32_t reserved2[8];
+ struct atom_external_display_connection_info extdispconninfo;
+ uint8_t UMACarveoutVersion;
+ uint8_t UMACarveoutIndexMax;
+ uint8_t UMACarveoutTypeDefault;
+ uint8_t UMACarveoutIndexDefault;
+ uint8_t UMACarveoutType; //Auto or Custom
+ uint8_t UMACarveoutIndex;
+ struct uma_carveout_option UMASizeControlOption[20];
+ uint8_t reserved3[110];
+};
+
// system_config
enum atom_system_vbiosmisc_def{
INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01,
@@ -3540,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
- struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
+ struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
};
struct atom_svid2_voltage_object_v4
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 2e8e6c9875f6..f83ace2d7ec3 100644
--- a/drivers/gpu/drm/amd/include/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
- //how many states we have
- UCHAR ucNumEntries;
-
- ATOM_PPLIB_STATE_V2 states[1];
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
}StateArray;
typedef struct _ClockInfoArray{
- //how many clock levels we have
- UCHAR ucNumEntries;
-
- //sizeof(ATOM_PPLIB_CLOCK_INFO)
- UCHAR ucEntrySize;
-
- UCHAR clockInfo[];
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[];
}ClockInfoArray;
typedef struct _NonClockInfoArray{
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
- //how many non-clock levels we have. normally should be same as number of states
- UCHAR ucNumEntries;
- //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
- UCHAR ucEntrySize;
-
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
@@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
union _ATOM_PPLIB_CAC_Leakage_Record
@@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
typedef struct _ATOM_PPLIB_CAC_Leakage_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_CAC_Leakage_Table;
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
@@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+ // Number of entries.
+ UCHAR ucNumEntries;
+ // Dynamically allocate entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries);
}ATOM_PPLIB_PhaseSheddingLimits_Table;
typedef struct _VCEClockInfo{
@@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{
}VCEClockInfo;
typedef struct _VCEClockInfoArray{
- UCHAR ucNumEntries;
- VCEClockInfo entries[1];
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[] __counted_by(ucNumEntries);
}VCEClockInfoArray;
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
@@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_VCE_State_Record
@@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record
typedef struct _ATOM_PPLIB_VCE_State_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_State_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_VCE_State_Table;
@@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{
}UVDClockInfo;
typedef struct _UVDClockInfoArray{
- UCHAR ucNumEntries;
- UVDClockInfo entries[1];
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[] __counted_by(ucNumEntries);
}UVDClockInfoArray;
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
@@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_UVD_Table
@@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
+ UCHAR numEntries;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_SAMU_Table
@@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+ UCHAR numEntries;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
typedef struct _ATOM_PPLIB_ACP_Table
@@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{
} ATOM_PPLIB_VQ_Budgeting_Record;
typedef struct ATOM_PPLIB_VQ_Budgeting_Table {
- UCHAR revid;
- UCHAR numEntries;
- ATOM_PPLIB_VQ_Budgeting_Record entries[1];
+ UCHAR revid;
+ UCHAR numEntries;
+ ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries);
} ATOM_PPLIB_VQ_Budgeting_Table;
#pragma pack()
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bc241b593db1..b6257f34a7c6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && adev->in_s4) {
- /* Adds a GFX reset as workaround just before sending the
- * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
- * an invalid state.
- */
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
- SMU_RESET_MODE_2, NULL);
- if (ret)
- return ret;
+ if (!en && !adev->in_s0ix) {
+ if (adev->in_s4) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+ }
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 706265220292..90703f4542ab 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v14_0_2_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 1931a7fa1a14..cf5287fcbbc2 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := \
- -I $(srctree)/$(src)/../include \
- -I $(srctree)/$(src)
+ -I $(src)/../include \
+ -I $(src)
komeda-y := \
komeda_drv.o \
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
index d8e449e6ebda..50cb8f7ee6b2 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c
@@ -72,11 +72,6 @@ struct gamma_curve_sector {
u32 segment_width;
};
-struct gamma_curve_segment {
- u32 start;
- u32 end;
-};
-
static struct gamma_curve_sector sector_tbl[] = {
{ 0, 4, 4 },
{ 16, 4, 4 },
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index dd21b81bd28f..66ccb61e2a66 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -953,7 +953,8 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge,
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 8f84e98249c7..2fbeda9025bf 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1092,7 +1092,7 @@ static int sii902x_init(struct sii902x *sii902x)
}
sii902x->i2cmux->priv = sii902x;
- ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+ ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0);
if (ret)
goto err_unreg_audio;
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index 8dbce9919a57..4140303d6260 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -87,7 +87,7 @@ CONFIG_DRM_PARADE_PS8640=y
CONFIG_DRM_LONTIUM_LT9611UXC=y
CONFIG_PHY_QCOM_USB_HS=y
CONFIG_QCOM_GPI_DMA=y
-CONFIG_USB_ONBOARD_HUB=y
+CONFIG_USB_ONBOARD_DEV=y
CONFIG_NVMEM_QCOM_QFPROM=y
CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
@@ -97,7 +97,7 @@ CONFIG_USB_RTL8152=y
# db820c ethernet
CONFIG_ATL1C=y
# Chromebooks ethernet
-CONFIG_USB_ONBOARD_HUB=y
+CONFIG_USB_ONBOARD_DEV=y
# 888 HDK ethernet
CONFIG_USB_LAN78XX=y
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 3577786b5db2..7f8e1cfbe19d 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3421,7 +3421,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
/**
* drm_dp_add_payload_part2() - Execute payload update part 2
* @mgr: Manager to use.
- * @state: The global atomic state
* @payload: The payload to update
*
* If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
@@ -3430,14 +3429,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
* Returns: 0 on success, negative error code on failure.
*/
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload)
{
int ret = 0;
/* Skip failed payloads */
if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name);
return -EIO;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 284ebae71cc4..6a8e45e9d0ec 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -239,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
if (size < chunk_size)
return -EINVAL;
- if (chunk_size < PAGE_SIZE)
+ if (chunk_size < SZ_4K)
return -EINVAL;
if (!is_power_of_2(chunk_size))
@@ -249,6 +249,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
mm->size = size;
mm->avail = size;
+ mm->clear_avail = 0;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
@@ -523,11 +524,11 @@ __alloc_range_bias(struct drm_buddy *mm,
continue;
}
+ if (!fallback && block_incompatible(block, flags))
+ continue;
+
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
- if (!fallback && block_incompatible(block, flags))
- continue;
-
/*
* Find the free block within the range.
*/
@@ -574,7 +575,7 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
block = __alloc_range_bias(mm, start, end, order,
flags, fallback);
- if (IS_ERR(block) && mm->clear_avail)
+ if (IS_ERR(block))
return __alloc_range_bias(mm, start, end, order,
flags, !fallback);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 177773bcdbfd..53c003983ad1 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -233,6 +233,8 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
dma_resv_assert_held(shmem->base.resv);
+ drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
+
ret = drm_gem_shmem_get_pages(shmem);
return ret;
@@ -611,6 +613,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index a70b01ccdf70..4d78b33eaa82 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -5,6 +5,7 @@
**************************************************************************/
#include <linux/highmem.h>
+#include <linux/vmalloc.h>
#include "mmu.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 7cad944b825c..c8c8b31da4fb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -36,7 +36,7 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
-subdir-ccflags-y += -I$(srctree)/$(src)
+subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -430,7 +430,7 @@ no-header-test := \
always-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
- $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+ $(shell cd $(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ed81e1466c4b..40e7d862675e 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
- int ret;
-
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
- I915_COMPONENT_AUDIO);
- if (ret < 0) {
- drm_err(&i915->drm,
- "failed to add audio component (%d)\n", ret);
- /* continue with reduced functionality */
- return;
- }
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
+}
+
+static void i915_audio_component_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = component_add_typed(i915->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
i915->display.audio.component_registered = true;
}
@@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
+void intel_audio_register(struct drm_i915_private *i915)
+{
+ if (!i915->display.audio.lpe.platdev)
+ i915_audio_component_register(i915);
+}
+
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 9327954b801e..576c061d72a4 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 89bd032ed995..794b4af38055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -540,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_driver_enable_user_access(i915);
+ intel_audio_register(i915);
+
intel_display_debugfs_register(i915);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 7862e7cefe02..49a5e6d9dc0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -34,7 +34,7 @@ TRACE_EVENT(intel_pipe_enable,
TP_fast_assign(
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc *it__;
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
for_each_intel_crtc(&dev_priv->drm, it__) {
__entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
__entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
@@ -63,7 +63,7 @@ TRACE_EVENT(intel_pipe_disable,
TP_fast_assign(
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc *it__;
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
for_each_intel_crtc(&dev_priv->drm, it__) {
__entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
__entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
@@ -91,7 +91,7 @@ TRACE_EVENT(intel_pipe_crc,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -119,7 +119,7 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -144,7 +144,7 @@ TRACE_EVENT(intel_pch_fifo_underrun,
TP_fast_assign(
enum pipe pipe = pch_transcoder;
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
- __assign_str(dev, __dev_name_i915(dev_priv));
+ __assign_str(dev);
__entry->pipe = pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -169,7 +169,7 @@ TRACE_EVENT(intel_memory_cxsr,
TP_fast_assign(
struct intel_crtc *crtc;
- __assign_str(dev, __dev_name_i915(dev_priv));
+ __assign_str(dev);
for_each_intel_crtc(&dev_priv->drm, crtc) {
__entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
__entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
@@ -209,7 +209,7 @@ TRACE_EVENT(g4x_wm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -256,7 +256,7 @@ TRACE_EVENT(vlv_wm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -293,7 +293,7 @@ TRACE_EVENT(vlv_fifo_size,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -323,8 +323,8 @@ TRACE_EVENT(intel_plane_update_noarm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -354,8 +354,8 @@ TRACE_EVENT(intel_plane_update_arm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -383,8 +383,8 @@ TRACE_EVENT(intel_plane_disable_arm,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -410,8 +410,8 @@ TRACE_EVENT(intel_fbc_activate,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -437,8 +437,8 @@ TRACE_EVENT(intel_fbc_deactivate,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -464,8 +464,8 @@ TRACE_EVENT(intel_fbc_nuke,
TP_fast_assign(
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
- __assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name);
+ __assign_str(dev);
+ __assign_str(name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -488,7 +488,7 @@ TRACE_EVENT(intel_crtc_vblank_work_start,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -511,7 +511,7 @@ TRACE_EVENT(intel_crtc_vblank_work_end,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -536,7 +536,7 @@ TRACE_EVENT(intel_pipe_update_start,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -564,7 +564,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = crtc->debug.start_vbl_count;
__entry->scanline = crtc->debug.scanline_start;
@@ -590,7 +590,7 @@ TRACE_EVENT(intel_pipe_update_end,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_kms(crtc));
+ __assign_str(dev);
__entry->pipe = crtc->pipe;
__entry->frame = frame;
__entry->scanline = scanline_end;
@@ -613,7 +613,7 @@ TRACE_EVENT(intel_frontbuffer_invalidate,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_i915(i915));
+ __assign_str(dev);
__entry->frontbuffer_bits = frontbuffer_bits;
__entry->origin = origin;
),
@@ -634,7 +634,7 @@ TRACE_EVENT(intel_frontbuffer_flush,
),
TP_fast_assign(
- __assign_str(dev, __dev_name_i915(i915));
+ __assign_str(dev);
__entry->frontbuffer_bits = frontbuffer_bits;
__entry->origin = origin;
),
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index c772ba19c547..715d2f59f565 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -1241,7 +1241,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+ drm_dp_add_payload_part2(&intel_dp->mst_mgr,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (DISPLAY_VER(dev_priv) >= 12)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 42619fc05de4..090724fa766c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -255,6 +255,7 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -2685,6 +2686,7 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
+ struct intel_gt *gt;
unsigned int idx;
int err;
@@ -2708,10 +2710,17 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
+ gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
+ /*
+ * Keep GT0 active on MTL so that i915_vma_parked() doesn't
+ * free VMAs while execbuf ioctl is validating VMAs.
+ */
+ if (gt->info.id)
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2750,6 +2759,9 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
+ if (gt->info.id)
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
+
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
@@ -2763,6 +2775,12 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
+ /*
+ * This works in conjunction with eb_select_engine() to prevent
+ * i915_vma_parked() from interfering while execbuf validates vmas.
+ */
+ if (eb->gt->info.id)
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3560a062d287..5d7446a48ae7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
+ /* TODO: make DPT shrinkable when it has no bound vmas */
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
+ !obj->is_dpt;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 0ba955611dfb..8780aa243105 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_cache.h>
+#include <linux/vmalloc.h>
#include "gt/intel_gt.h"
#include "gt/intel_tlb.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 65a931ea80e9..3527b8f446fe 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -196,7 +196,7 @@ static int verify_access(struct drm_i915_private *i915,
if (err)
goto out_file;
- mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
+ mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index b2a5882b8f81..075657018739 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -4,6 +4,7 @@
* Copyright © 2016 Intel Corporation
*/
+#include <linux/vmalloc.h>
#include "mock_dmabuf.h"
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d650beb8ed22..20b9b04ec1e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+ /* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
+
+ /* And confirm that we still want irqs enabled before we yield */
+ if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
+ intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
- local_irq_disable();
- signal_irq_work(&b->irq_work);
- local_irq_enable();
- cond_resched();
- }
+ irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
- if (!b->irq_armed || __i915_request_is_complete(rq))
+ if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5c8e9ee3b008..3b740ca25000 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -885,6 +885,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
if (IS_DG2(gt->i915)) {
u8 first_ccs = __ffs(CCS_MASK(gt));
+ /*
+ * Store the number of active cslices before
+ * changing the CCS engine configuration
+ */
+ gt->ccs.cslices = CCS_MASK(gt);
+
/* Mask off all the CCS engine */
info->engine_mask &= ~GENMASK(CCS3, CCS0);
/* Put back in the first CCS engine */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
index 99b71bb7da0a..3c62a44e9106 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
- if (CCS_MASK(gt) & BIT(cslice))
+ if (gt->ccs.cslices & BIT(cslice))
/*
* If available, assign the cslice
* to the first available engine...
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index def7dd0eb6f1..cfdd2ad5e954 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -207,6 +207,14 @@ struct intel_gt {
[MAX_ENGINE_INSTANCE + 1];
enum intel_submission_method submission_method;
+ struct {
+ /*
+ * Mask of the non fused CCS slices
+ * to be used for the load balancing
+ */
+ intel_engine_mask_t cslices;
+ } ccs;
+
/*
* Default address space (either GGTT or ppGTT depending on arch).
*
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index bccc3a1200bc..1fb6ff77fd89 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "gem/i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index bebf28e3c479..525587cfe1af 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 4dd52ac2043e..221a3ae81baf 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -30,6 +30,7 @@
#include <linux/firmware.h>
#include <linux/crc32.h>
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "gvt.h"
@@ -50,21 +51,7 @@ struct gvt_firmware_header {
#define dev_to_drm_minor(d) dev_get_drvdata((d))
-static ssize_t
-gvt_firmware_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t offset, size_t count)
-{
- memcpy(buf, attr->private + offset, count);
- return count;
-}
-
-static struct bin_attribute firmware_attr = {
- .attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
- .read = gvt_firmware_read,
- .write = NULL,
- .mmap = NULL,
-};
+static BIN_ATTR_SIMPLE_ADMIN_RO(gvt_firmware);
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
@@ -107,10 +94,10 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
crc32_start = offsetof(struct gvt_firmware_header, version);
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
- firmware_attr.size = size;
- firmware_attr.private = firmware;
+ bin_attr_gvt_firmware.size = size;
+ bin_attr_gvt_firmware.private = firmware;
- ret = device_create_bin_file(&pdev->dev, &firmware_attr);
+ ret = device_create_bin_file(&pdev->dev, &bin_attr_gvt_firmware);
if (ret) {
vfree(firmware);
return ret;
@@ -122,8 +109,8 @@ static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
- device_remove_bin_file(&pdev->dev, &firmware_attr);
- vfree(firmware_attr.private);
+ device_remove_bin_file(&pdev->dev, &bin_attr_gvt_firmware);
+ vfree(bin_attr_gvt_firmware.private);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 094fca9b0e73..58cca4906f41 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -39,6 +39,7 @@
#include "trace.h"
#include "gt/intel_gt_regs.h"
+#include <linux/vmalloc.h>
#if defined(VERBOSE_DEBUG)
#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 102eb354fed6..22fbddbe3e23 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -54,6 +54,7 @@
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
+#include <linux/vmalloc.h>
/* XXX FIXME i915 has changed PP_XXX definition */
#define PCH_PP_STATUS _MMIO(0xc7200)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 922711e0e30b..e16e0d4c9534 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -33,6 +33,7 @@
*
*/
+#include <linux/vmalloc.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 08ad1bd651f1..63c751ca4119 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -34,6 +34,7 @@
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
+#include <linux/vmalloc.h>
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 9b6d87c8b583..5a01d60e5186 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -28,6 +28,7 @@
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "gt/shmem_utils.h"
+#include <linux/vmalloc.h>
/**
* DOC: Intel GVT-g host support
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index ec6db8e9b403..9bc6a3884c22 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
-subdir-ccflags-y := -I$(srctree)/$(src)
+subdir-ccflags-y := -I$(src)
powervr-y := \
pvr_ccb.o \
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 4f99b4af871c..94af854547d6 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
/**
* pvr_vm_mips_init() - Initialise MIPS FW pagetable
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 7ea244d876ca..9bb997dbb4b9 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_pin(&bo->base);
+ return drm_gem_shmem_pin_locked(&bo->base);
}
static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 494b9790b1da..3a349d10304e 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(lima_task,
__entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
- __assign_str(pipe, task->base.sched->name);
+ __assign_str(pipe);
),
TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
index 5a82d7cf3ed0..a172456d1d7b 100644
--- a/drivers/gpu/drm/mediatek/mtk_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_gem.c
@@ -4,6 +4,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 718968717ad5..eb788921ff4f 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/$(src)
+ccflags-y := -I $(src)
ccflags-y += -I $(obj)/generated
-ccflags-y += -I $(srctree)/$(src)/disp/dpu1
-ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
-ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
+ccflags-y += -I $(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(src)/dsi
+ccflags-$(CONFIG_DRM_MSM_DP) += -I $(src)/dp
adreno-y := \
adreno/adreno_device.o \
@@ -163,8 +163,8 @@ else
endif
quiet_cmd_headergen = GENHDR $@
- cmd_headergen = mkdir -p $(obj)/generated && $(PYTHON3) $(srctree)/$(src)/registers/gen_header.py \
- $(headergen-opts) --rnn $(srctree)/$(src)/registers --xml $< c-defines > $@
+ cmd_headergen = mkdir -p $(obj)/generated && $(PYTHON3) $(src)/registers/gen_header.py \
+ $(headergen-opts) --rnn $(src)/registers --xml $< c-defines > $@
$(obj)/generated/%.xml.h: $(src)/registers/adreno/%.xml \
$(src)/registers/adreno/adreno_common.xml \
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index bd92fb2979aa..0fdd41162e4b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -113,7 +113,7 @@ TRACE_EVENT(tracing_mark_write,
),
TP_fast_assign(
__entry->pid = pid;
- __assign_str(trace_name, name);
+ __assign_str(trace_name);
__entry->trace_begin = trace_begin;
),
TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
@@ -130,7 +130,7 @@ TRACE_EVENT(dpu_trace_counter,
),
TP_fast_assign(
__entry->pid = current->tgid;
- __assign_str(counter_name, name);
+ __assign_str(counter_name);
__entry->value = value;
),
TP_printk("%d|%s|%d", __entry->pid,
@@ -379,7 +379,7 @@ TRACE_EVENT(dpu_enc_rc,
__entry->sw_event = sw_event;
__entry->idle_pc_supported = idle_pc_supported;
__entry->rc_state = rc_state;
- __assign_str(stage_str, stage);
+ __assign_str(stage_str);
),
TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d",
__get_str(stage_str), __entry->drm_id, __entry->sw_event,
@@ -401,7 +401,7 @@ TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->event = event;
- __assign_str(intf_mode_str, intf_mode);
+ __assign_str(intf_mode_str);
__entry->intf_idx = intf_idx;
__entry->wb_idx = wb_idx;
),
@@ -446,7 +446,7 @@ TRACE_EVENT(dpu_enc_trigger_flush,
),
TP_fast_assign(
__entry->drm_id = drm_id;
- __assign_str(intf_mode_str, intf_mode);
+ __assign_str(intf_mode_str);
__entry->intf_idx = intf_idx;
__entry->wb_idx = wb_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
@@ -946,7 +946,7 @@ TRACE_EVENT(dpu_core_perf_update_clk,
__field( u64, clk_rate )
),
TP_fast_assign(
- __assign_str(dev_name, dev->unique);
+ __assign_str(dev_name);
__entry->stop_req = stop_req;
__entry->clk_rate = clk_rate;
),
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index fc3bfdc991d2..3926485bb197 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -538,7 +538,7 @@ class Parser(object):
self.variants.add(reg.domain)
def do_validate(self, schemafile):
- if self.validate == False:
+ if not self.validate:
return
try:
@@ -948,7 +948,8 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn', type=str, required=True)
parser.add_argument('--xml', type=str, required=True)
- parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--validate', default=False, action='store_true')
+ parser.add_argument('--no-validate', dest='validate', action='store_false')
subparsers = parser.add_subparsers()
subparsers.required = True
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index cf6b3a80c0c8..c32c01827c1d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,10 +1,8 @@
-NOUVEAU_PATH ?= $(srctree)
-
# SPDX-License-Identifier: MIT
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
-ccflags-y += -I $(NOUVEAU_PATH)/$(src)
+ccflags-y += -I $(src)/include
+ccflags-y += -I $(src)/include/nvkm
+ccflags-y += -I $(src)/nvkm
+ccflags-y += -I $(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0c3d88ad0b0e..88728a0b2c25 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -915,7 +915,7 @@ nv50_msto_cleanup(struct drm_atomic_state *state,
msto->disabled = false;
drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
} else if (msto->enabled) {
- drm_dp_add_payload_part2(mgr, state, new_payload);
+ drm_dp_add_payload_part2(mgr, new_payload);
msto->enabled = false;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index f465fe93b1f7..d56909071de6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
break;
}
+ case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
+ getparam->value = 1;
+ break;
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1e2d28fd10dc..70fb003a6666 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
- if (!nouveau_cli_uvmm(cli) || internal) {
- /* for BO noVM allocs, don't assign kinds */
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
}
- nvbo->mode = tile_mode;
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+ if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->page = vmm->page[pi].shift;
} else {
- /* reject other tile flags when in VM mode. */
- if (tile_mode)
- return ERR_PTR(-EINVAL);
- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
- return ERR_PTR(-EINVAL);
-
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 4d1aaee8fe15..1d19c87eaec1 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
+ u32 args_size;
u8 stack[128];
int ret;
- if (sizeof(*args) + size > sizeof(stack)) {
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ if (check_add_overflow(sizeof(*args), size, &args_size))
+ return -ENOMEM;
+
+ if (args_size > sizeof(stack)) {
+ args = kmalloc(args_size, GFP_KERNEL);
+ if (!args)
return -ENOMEM;
} else {
args = (void *)stack;
@@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
args->mthd.method = mthd;
memcpy(args->mthd.data, data, size);
- ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ ret = nvif_object_ioctl(object, args, args_size, NULL);
memcpy(data, args->mthd.data, size);
if (args != (void *)stack)
kfree(args);
@@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
object->map.size = 0;
if (parent) {
- if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
+ u32 args_size;
+
+ if (check_add_overflow(sizeof(*args), size, &args_size)) {
+ nvif_object_dtor(object);
+ return -ENOMEM;
+ }
+
+ args = kmalloc(args_size, GFP_KERNEL);
+ if (!args) {
nvif_object_dtor(object);
return -ENOMEM;
}
@@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->new.oclass = oclass;
memcpy(args->new.data, data, size);
- ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
- &object->priv);
+ ret = nvif_object_ioctl(parent, args, args_size, &object->priv);
memcpy(data, args->new.data, size);
kfree(args);
if (ret == 0)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 3421e8389222..9ea0c64c26b5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -9,6 +9,7 @@
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
+#include <linux/vmalloc.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 982324ef5a41..2ae0eb0638f3 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -340,6 +340,8 @@ config DRM_PANEL_LG_SW43408
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for LG sw43408 panel.
The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c
index 115f4702d59f..2b3a73696dce 100644
--- a/drivers/gpu/drm/panel/panel-lg-sw43408.c
+++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c
@@ -182,7 +182,7 @@ static int sw43408_backlight_update_status(struct backlight_device *bl)
return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
}
-const struct backlight_ops sw43408_backlight_ops = {
+static const struct backlight_ops sw43408_backlight_ops = {
.update_status = sw43408_backlight_update_status,
};
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 88e80fe98112..28bfc48a9127 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
- .hsync_start = 240 + 28,
- .hsync_end = 240 + 28 + 10,
- .htotal = 240 + 28 + 10 + 10,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
.vdisplay = 280,
- .vsync_start = 280 + 8,
- .vsync_end = 280 + 8 + 4,
- .vtotal = 280 + 8 + 4 + 4,
- .width_mm = 43,
- .height_mm = 37,
+ .vsync_start = 280 + 48,
+ .vsync_end = 280 + 48 + 4,
+ .vtotal = 280 + 48 + 4 + 4,
+ .width_mm = 37,
+ .height_mm = 43,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
- of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index d47b40b82b0b..8e0ff3efede7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -192,7 +192,7 @@ static int panfrost_gem_pin(struct drm_gem_object *obj)
if (bo->is_heap)
return -EINVAL;
- return drm_gem_shmem_pin(&bo->base);
+ return drm_gem_shmem_pin_locked(&bo->base);
}
static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 75276cbeba20..4082c8f2951d 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_gpu_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
- if (ret)
- goto out_dev_exit;
-
atomic_set(&ptdev->reset.pending, 0);
- panthor_sched_post_reset(ptdev);
-
-out_dev_exit:
+ panthor_sched_post_reset(ptdev, ret != 0);
drm_dev_exit(cookie);
if (ret) {
@@ -293,6 +288,7 @@ static const struct panthor_exception_info panthor_exception_infos[] = {
PANTHOR_EXCEPTION(ACTIVE),
PANTHOR_EXCEPTION(CS_RES_TERM),
PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
+ PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
PANTHOR_EXCEPTION(CS_BUS_FAULT),
PANTHOR_EXCEPTION(CS_INSTR_INVALID),
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 2fdd671b38fd..e388c0472ba7 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -216,6 +216,7 @@ enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
+ DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE = 0x41,
DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index fedf9627453f..857f3f11258a 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -453,7 +453,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
ret = panthor_kernel_bo_vmap(mem);
if (ret) {
- panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), mem);
+ panthor_kernel_bo_destroy(mem);
return ERR_PTR(ret);
}
@@ -1134,7 +1134,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
panthor_fw_stop(ptdev);
list_for_each_entry(section, &ptdev->fw->sections, node)
- panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), section->mem);
+ panthor_kernel_bo_destroy(section->mem);
/* We intentionally don't call panthor_vm_idle() and let
* panthor_mmu_unplug() release the AS we acquired with
@@ -1142,6 +1142,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
* state to keep the active_refcnt balanced.
*/
panthor_vm_put(ptdev->fw->vm);
+ ptdev->fw->vm = NULL;
panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
}
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index d6483266d0c2..38f560864879 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -26,18 +26,18 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
/**
* panthor_kernel_bo_destroy() - Destroy a kernel buffer object
- * @vm: The VM this BO was mapped to.
* @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
* is skipped.
*/
-void panthor_kernel_bo_destroy(struct panthor_vm *vm,
- struct panthor_kernel_bo *bo)
+void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
{
+ struct panthor_vm *vm;
int ret;
if (IS_ERR_OR_NULL(bo))
return;
+ vm = bo->vm;
panthor_kernel_bo_vunmap(bo);
if (drm_WARN_ON(bo->obj->dev,
@@ -53,6 +53,7 @@ void panthor_kernel_bo_destroy(struct panthor_vm *vm,
drm_gem_object_put(bo->obj);
out_free_bo:
+ panthor_vm_put(vm);
kfree(bo);
}
@@ -106,6 +107,7 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
if (ret)
goto err_free_va;
+ kbo->vm = panthor_vm_get(vm);
bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
drm_gem_object_get(bo->exclusive_vm_root_gem);
bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 3bccba394d00..e43021cf6d45 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -62,6 +62,11 @@ struct panthor_kernel_bo {
struct drm_gem_object *obj;
/**
+ * @vm: VM this private buffer is attached to.
+ */
+ struct panthor_vm *vm;
+
+ /**
* @va_node: VA space allocated to this GEM.
*/
struct drm_mm_node va_node;
@@ -136,7 +141,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
u64 gpu_va);
-void panthor_kernel_bo_destroy(struct panthor_vm *vm,
- struct panthor_kernel_bo *bo);
+void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 143fa35f2e74..3796a9eb22af 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -127,7 +127,7 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
heap->chunk_count--;
mutex_unlock(&heap->lock);
- panthor_kernel_bo_destroy(vm, chunk->bo);
+ panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk);
}
@@ -183,7 +183,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
return 0;
err_destroy_bo:
- panthor_kernel_bo_destroy(vm, chunk->bo);
+ panthor_kernel_bo_destroy(chunk->bo);
err_free_chunk:
kfree(chunk);
@@ -253,8 +253,8 @@ int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
* @pool: Pool to instantiate the heap context from.
* @initial_chunk_count: Number of chunk allocated at initialization time.
* Must be at least 1.
- * @chunk_size: The size of each chunk. Must be a power of two between 256k
- * and 2M.
+ * @chunk_size: The size of each chunk. Must be page-aligned and lie in the
+ * [128k:8M] range.
* @max_chunks: Maximum number of chunks that can be allocated.
* @target_in_flight: Maximum number of in-flight render passes.
* @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
@@ -281,8 +281,11 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
if (initial_chunk_count == 0)
return -EINVAL;
- if (hweight32(chunk_size) != 1 ||
- chunk_size < SZ_256K || chunk_size > SZ_2M)
+ if (initial_chunk_count > max_chunks)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(chunk_size, PAGE_SIZE) ||
+ chunk_size < SZ_128K || chunk_size > SZ_8M)
return -EINVAL;
down_read(&pool->lock);
@@ -320,7 +323,8 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
if (!pool->vm) {
ret = -EINVAL;
} else {
- ret = xa_alloc(&pool->xa, &id, heap, XA_LIMIT(1, MAX_HEAPS_PER_POOL), GFP_KERNEL);
+ ret = xa_alloc(&pool->xa, &id, heap,
+ XA_LIMIT(0, MAX_HEAPS_PER_POOL - 1), GFP_KERNEL);
if (!ret) {
void *gpu_ctx = panthor_get_heap_ctx(pool, id);
@@ -391,7 +395,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
mutex_unlock(&heap->lock);
if (removed) {
- panthor_kernel_bo_destroy(pool->vm, chunk->bo);
+ panthor_kernel_bo_destroy(chunk->bo);
kfree(chunk);
ret = 0;
} else {
@@ -410,6 +414,13 @@ out_unlock:
* @renderpasses_in_flight: Number of render passes currently in-flight.
* @pending_frag_count: Number of fragment jobs waiting for execution/completion.
* @new_chunk_gpu_va: Pointer used to return the chunk VA.
+ *
+ * Return:
+ * - 0 if a new heap was allocated
+ * - -ENOMEM if the tiler context reached the maximum number of chunks
+ * or if too many render passes are in-flight
+ * or if the allocation failed
+ * - -EINVAL if any of the arguments passed to panthor_heap_grow() is invalid
*/
int panthor_heap_grow(struct panthor_heap_pool *pool,
u64 heap_gpu_va,
@@ -439,10 +450,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
* handler provided by the userspace driver, if any).
*/
if (renderpasses_in_flight > heap->target_in_flight ||
- (pending_frag_count > 0 && heap->chunk_count >= heap->max_chunks)) {
- ret = -EBUSY;
- goto out_unlock;
- } else if (heap->chunk_count >= heap->max_chunks) {
+ heap->chunk_count >= heap->max_chunks) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -536,7 +544,7 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->vm = vm;
pool->ptdev = ptdev;
init_rwsem(&pool->lock);
- xa_init_flags(&pool->xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
kref_init(&pool->refcount);
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
@@ -587,7 +595,7 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
if (!IS_ERR_OR_NULL(pool->gpu_contexts))
- panthor_kernel_bo_destroy(pool->vm, pool->gpu_contexts);
+ panthor_kernel_bo_destroy(pool->gpu_contexts);
/* Reflects the fact the pool has been destroyed. */
pool->vm = NULL;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 7f16a4a14e9a..79ffcbc41d78 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -826,8 +826,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
panthor_queue_put_syncwait_obj(queue);
- panthor_kernel_bo_destroy(group->vm, queue->ringbuf);
- panthor_kernel_bo_destroy(panthor_fw_vm(group->ptdev), queue->iface.mem);
+ panthor_kernel_bo_destroy(queue->ringbuf);
+ panthor_kernel_bo_destroy(queue->iface.mem);
kfree(queue);
}
@@ -837,15 +837,14 @@ static void group_release_work(struct work_struct *work)
struct panthor_group *group = container_of(work,
struct panthor_group,
release_work);
- struct panthor_device *ptdev = group->ptdev;
u32 i;
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);
- panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->suspend_buf);
- panthor_kernel_bo_destroy(panthor_fw_vm(ptdev), group->protm_suspend_buf);
- panthor_kernel_bo_destroy(group->vm, group->syncobjs);
+ panthor_kernel_bo_destroy(group->suspend_buf);
+ panthor_kernel_bo_destroy(group->protm_suspend_buf);
+ panthor_kernel_bo_destroy(group->syncobjs);
panthor_vm_put(group->vm);
kfree(group);
@@ -1281,7 +1280,16 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
if (group)
group->fatal_queues |= BIT(cs_id);
- sched_queue_delayed_work(sched, tick, 0);
+ if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
+ /* If this exception is unrecoverable, queue a reset, and make
+ * sure we stop scheduling groups until the reset has happened.
+ */
+ panthor_device_schedule_reset(ptdev);
+ cancel_delayed_work(&sched->tick_work);
+ } else {
+ sched_queue_delayed_work(sched, tick, 0);
+ }
+
drm_warn(&ptdev->base,
"CSG slot %d CS slot: %d\n"
"CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
@@ -1385,7 +1393,12 @@ static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
pending_frag_count, &new_chunk_va);
}
- if (ret && ret != -EBUSY) {
+ /* If the heap context doesn't have memory for us, we want to let the
+ * FW try to reclaim memory by waiting for fragment jobs to land or by
+ * executing the tiler OOM exception handler, which is supposed to
+ * implement incremental rendering.
+ */
+ if (ret && ret != -ENOMEM) {
drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
group->fatal_queues |= BIT(cs_id);
sched_queue_delayed_work(sched, tick, 0);
@@ -2720,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev)
mutex_unlock(&sched->reset.lock);
}
-void panthor_sched_post_reset(struct panthor_device *ptdev)
+void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
{
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_group *group, *group_tmp;
mutex_lock(&sched->reset.lock);
- list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node)
+ list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
+ /* Consider all previously running group as terminated if the
+ * reset failed.
+ */
+ if (reset_failed)
+ group->state = PANTHOR_CS_GROUP_TERMINATED;
+
panthor_group_start(group);
+ }
/* We're done resetting the GPU, clear the reset.in_progress bit so we can
* kick the scheduler.
@@ -2736,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev)
atomic_set(&sched->reset.in_progress, false);
mutex_unlock(&sched->reset.lock);
- sched_queue_delayed_work(sched, tick, 0);
-
- sched_queue_work(sched, sync_upd);
+ /* No need to queue a tick and update syncs if the reset failed. */
+ if (!reset_failed) {
+ sched_queue_delayed_work(sched, tick, 0);
+ sched_queue_work(sched, sync_upd);
+ }
}
static void group_sync_upd_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 66438b1f331f..3a30d2328b30 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile);
int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev);
void panthor_sched_pre_reset(struct panthor_device *ptdev);
-void panthor_sched_post_reset(struct panthor_device *ptdev);
+void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed);
void panthor_sched_suspend(struct panthor_device *ptdev);
void panthor_sched_resume(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index f8ed093b7356..c75302ca3427 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -48,7 +48,7 @@ DECLARE_EVENT_CLASS(drm_sched_job,
__entry->entity = entity;
__entry->id = sched_job->id;
__entry->fence = &sched_job->s_fence->finished;
- __assign_str(name, sched_job->sched->name);
+ __assign_str(name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->credit_count);
@@ -94,7 +94,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
),
TP_fast_assign(
- __assign_str(name, sched_job->sched->name);
+ __assign_str(name);
__entry->id = sched_job->id;
__entry->fence = fence;
__entry->ctx = fence->context;
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index e3b50e240d36..dd8fb9f8341a 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -23,9 +23,11 @@ static inline u64 get_size(int order, u64 chunk_size)
static void drm_test_buddy_alloc_range_bias(struct kunit *test)
{
- u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
DRM_RND_STATE(prng, random_seed);
unsigned int i, count, *order;
+ struct drm_buddy_block *block;
+ unsigned long flags;
struct drm_buddy mm;
LIST_HEAD(allocated);
@@ -222,6 +224,38 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
+
+ /*
+ * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
+ * zero. This will validate the bias range allocation in scenarios like system boot
+ * when no cleared blocks are available and exercise the fallback path too. The resulting
+ * blocks should always be dirty.
+ */
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ flags),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ list_for_each_entry(block, &allocated, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+ drm_buddy_free_list(&mm, &allocated, 0);
+ drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_clear(struct kunit *test)
@@ -471,8 +505,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
* Eventually we will have a fully 50% fragmented mm.
*/
- mm_size = PAGE_SIZE << max_order;
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ mm_size = SZ_4K << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
@@ -486,7 +520,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
for (order = top; order--;) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
mm_size, size, size,
&tmp, flags),
@@ -500,7 +534,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
}
/* There should be one final page for this sub-allocation */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM for hole\n");
@@ -510,7 +544,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
list_move_tail(&block->link, &holes);
- size = get_size(top, PAGE_SIZE);
+ size = get_size(top, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
@@ -521,7 +555,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded at order %d, it should be full!",
@@ -550,14 +584,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
* page left.
*/
- mm_size = PAGE_SIZE << max_order;
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ mm_size = SZ_4K << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order < max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -570,7 +604,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* And now the last remaining block available */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM on final alloc\n");
@@ -582,7 +616,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
/* Should be completely full! */
for (order = max_order; order--;) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@@ -598,7 +632,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
list_del(&block->link);
drm_buddy_free_block(&mm, block);
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -613,7 +647,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
}
/* To confirm, now the whole mm should be available */
- size = get_size(max_order, PAGE_SIZE);
+ size = get_size(max_order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
@@ -644,15 +678,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
* try to allocate them all.
*/
- mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+ mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
"buddy_init failed\n");
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
for (order = 0; order <= max_order; order++) {
- size = get_size(order, PAGE_SIZE);
+ size = get_size(order, mm.chunk_size);
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc hit -ENOMEM with order=%d\n",
@@ -665,7 +699,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
}
/* Should be completely full! */
- size = get_size(0, PAGE_SIZE);
+ size = get_size(0, mm.chunk_size);
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
size, size, &tmp, flags),
"buddy_alloc unexpectedly succeeded, it should be full!");
@@ -682,7 +716,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
LIST_HEAD(allocated);
struct drm_buddy mm;
- KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
"mm.max_order(%d) != %d\n", mm.max_order,
@@ -690,7 +724,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
size = mm.chunk_size << mm.max_order;
KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
- PAGE_SIZE, &allocated, flags));
+ mm.chunk_size, &allocated, flags));
block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
KUNIT_EXPECT_TRUE(test, block);
@@ -700,10 +734,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_block_order(block), mm.max_order);
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE,
+ BIT_ULL(mm.max_order) * mm.chunk_size,
"block size(%llu) != %llu\n",
drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
+ BIT_ULL(mm.max_order) * mm.chunk_size);
drm_buddy_free_list(&mm, &allocated, 0);
drm_buddy_fini(&mm);
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a07ede668cc1..a165cbcdd27b 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -21,6 +21,7 @@
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
+#include <linux/vmalloc.h>
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 9539aa28937f..188e126383c2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -154,7 +154,6 @@ static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h
index 031bc77689d5..227bf0ae7ed5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_trace.h
+++ b/drivers/gpu/drm/virtio/virtgpu_trace.h
@@ -25,7 +25,7 @@ DECLARE_EVENT_CLASS(virtio_gpu_cmd,
TP_fast_assign(
__entry->dev = vq->vdev->index;
__entry->vq = vq->index;
- __assign_str(name, vq->name);
+ __assign_str(name);
__entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index ae2de914eb89..2731f6ded1c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -54,6 +54,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
#include "device_include/svga3d_reg.h"
+#include <linux/vmalloc.h>
#define VMW_BINDING_RT_BIT 0
#define VMW_BINDING_PS_BIT 1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 195ff8792e5a..dd4ca6a9c690 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_placement.h>
#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
index 829df395c2ed..6e6beff9e262 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include <linux/vmalloc.h>
#include "vmwgfx_devcaps.h"
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bdad93864b98..823d8d2da17c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
+#include <linux/vmalloc.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -745,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
dev->vram_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "Register MMIO at 0x%pa size is %llu kiB\n",
+ "Register MMIO at 0x%pa size is %llu KiB\n",
&rmmio_start, (uint64_t)rmmio_size / 1024);
dev->rmmio = devm_ioremap(dev->drm.dev,
rmmio_start,
@@ -764,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
fifo_size = pci_resource_len(pdev, 2);
drm_info(&dev->drm,
- "FIFO at %pa size is %llu kiB\n",
+ "FIFO at %pa size is %llu KiB\n",
&fifo_start, (uint64_t)fifo_size / 1024);
dev->fifo_mem = devm_memremap(dev->drm.dev,
fifo_start,
@@ -789,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
* SVGA_REG_VRAM_SIZE.
*/
drm_info(&dev->drm,
- "VRAM at %pa size is %llu kiB\n",
+ "VRAM at %pa size is %llu KiB\n",
&dev->vram_start, (uint64_t)dev->vram_size / 1024);
return 0;
@@ -959,13 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
- /*
- * Workaround for low memory 2D VMs to compensate for the
- * allocation taken by fbdev
- */
- if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 3;
-
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
@@ -990,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->max_primary_mem = dev_priv->vram_size;
}
drm_info(&dev_priv->drm,
- "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n",
+ "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n",
(u64)dev_priv->vram_size / 1024,
(u64)dev_priv->fifo_mem_size / 1024,
dev_priv->memory_size / 1024);
drm_info(&dev_priv->drm,
- "MOB limits: max mob size = %u kB, max mob pages = %u\n",
+ "MOB limits: max mob size = %u KiB, max mob pages = %u\n",
dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
ret = vmw_dma_masks(dev_priv);
@@ -1014,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
(unsigned)dev_priv->max_gmr_pages);
}
drm_info(&dev_priv->drm,
- "Maximum display memory size is %llu kiB\n",
+ "Maximum display memory size is %llu KiB\n",
(uint64_t)dev_priv->max_primary_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ecaea0026fc..a1ce41e1c468 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1043,9 +1043,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index cc3086e649eb..2e52d73eba48 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,6 +35,7 @@
#include <linux/sync_file.h>
#include <linux/hashtable.h>
+#include <linux/vmalloc.h>
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index a0b47c9b33f5..5bd967fbcf55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
} else
new_max_pages = gman->max_gmr_pages * 2;
if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
- DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
+ DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n",
((new_max_pages) << (PAGE_SHIFT - 10)));
gman->max_gmr_pages = new_max_pages;
} else {
char buf[256];
snprintf(buf, sizeof(buf),
- "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
+ "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n",
((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
vmw_host_printf(buf);
DRM_WARN("%s", buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a1da5678c731..835d1eed8dd9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -31,6 +31,7 @@
#include <drm/vmwgfx_drm.h>
#include <linux/pci.h>
+#include <linux/vmalloc.h>
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 13b2820cae51..00c4ff684130 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -224,7 +224,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
changed = false;
- if (old_image && new_image)
+ if (old_image && new_image && old_image != new_image)
changed = memcmp(old_image, new_image, size) != 0;
return changed;
@@ -2171,13 +2171,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
+static
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
+ u64 pitch,
+ u64 height)
{
- return ((u64) pitch * (u64) height) < (u64)
- ((dev_priv->active_display_unit == vmw_du_screen_target) ?
- dev_priv->max_primary_mem : dev_priv->vram_size);
+ return (pitch * height) < (u64)dev_priv->vram_size;
}
/**
@@ -2873,25 +2872,18 @@ out_unref:
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
- u32 max_width = dev_priv->texture_max_width;
- u32 max_height = dev_priv->texture_max_height;
u32 assumed_cpp = 4;
if (dev_priv->assume_16bpp)
assumed_cpp = 2;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(dev_priv->stdu_max_width, max_width);
- max_height = min(dev_priv->stdu_max_height, max_height);
- }
-
- if (max_width < mode->hdisplay)
- return MODE_BAD_HVALUE;
-
- if (max_height < mode->vdisplay)
- return MODE_BAD_VVALUE;
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
if (!vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_cpp,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 2041c4d48daa..a04e0736318d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -43,7 +43,14 @@
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
-
+/*
+ * Some renderers such as llvmpipe will align the width and height of their
+ * buffers to match their tile size. We need to keep this in mind when exposing
+ * modes to userspace so that this possible over-allocation will not exceed
+ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
+ * tile size of current renderers.
+ */
+#define GPU_TILE_SIZE 64
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
@@ -85,11 +92,6 @@ struct vmw_stdu_update {
SVGA3dCmdUpdateGBScreenTarget body;
};
-struct vmw_stdu_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
-};
-
struct vmw_stdu_surface_copy {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
@@ -414,6 +416,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
+ struct drm_crtc_state *new_crtc_state;
int ret;
if (!crtc) {
@@ -423,6 +426,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (dev_priv->vkms_enabled)
drm_crtc_vblank_off(crtc);
@@ -434,6 +438,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
(void) vmw_stdu_update_st(dev_priv, stdu);
+ /* Don't destroy the Screen Target if we are only setting the
+ * display as inactive
+ */
+ if (new_crtc_state->enable &&
+ !new_crtc_state->active &&
+ !new_crtc_state->mode_changed)
+ return;
+
ret = vmw_stdu_destroy_st(dev_priv, stdu);
if (ret)
DRM_ERROR("Failed to destroy Screen Target\n");
@@ -829,7 +841,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
+static enum drm_mode_status
+vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
+ /* Align width and height to account for GPU tile over-alignment */
+ u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
+ ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
+ assumed_cpp;
+ required_mem = ALIGN(required_mem, PAGE_SIZE);
+
+ ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
+ dev_priv->stdu_max_height);
+ if (ret != MODE_OK)
+ return ret;
+
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
+ if (required_mem > dev_priv->max_primary_mem)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
@@ -845,7 +891,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid
};
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 8321ec4f9b46..b165bbf52aef 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -32,7 +32,7 @@ endif
# Enable -Werror in CI and development
subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
-subdir-ccflags-y += -I$(obj) -I$(srctree)/$(src)
+subdir-ccflags-y += -I$(obj) -I$(src)
# generated sources
hostprogs := xe_gen_wa_oob
@@ -43,7 +43,7 @@ quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
- $(srctree)/$(src)/xe_wa_oob.rules
+ $(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
uses_generated_oob := \
@@ -176,8 +176,8 @@ endif
# i915 Display compat #defines and #includes
subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
- -I$(srctree)/$(src)/display/ext \
- -I$(srctree)/$(src)/compat-i915-headers \
+ -I$(src)/display/ext \
+ -I$(src)/compat-i915-headers \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
-Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
@@ -319,7 +319,7 @@ ifneq ($(CONFIG_DRM_XE_DISPLAY),y)
endif
always-$(CONFIG_DRM_XE_WERROR) += \
- $(patsubst %.h,%.hdrtest, $(shell cd $(srctree)/$(src) && find * -name '*.h' $(hdrtest_find_args)))
+ $(patsubst %.h,%.hdrtest, $(shell cd $(src) && find * -name '*.h' $(hdrtest_find_args)))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = $(CC) -DHDRTEST $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 79116ad58620..476d613333a9 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1749,6 +1749,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
if (!xe_gt_is_media_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
pf_release_vf_config_lmem(gt, config);
+ pf_update_vf_lmtt(gt_to_xe(gt), vfid);
}
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index c7d38469fb46..e4e3658e6a13 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1240,6 +1240,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
return 0;
err_entity:
+ mutex_unlock(&guc->submission_state.lock);
xe_sched_entity_fini(&ge->entity);
err_sched:
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 9f6e9b7f11c8..65e5a3f4c340 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -34,7 +34,6 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_vm.h"
-#include "xe_wa.h"
/**
* struct xe_migrate - migrate context.
@@ -300,10 +299,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
/*
- * Due to workaround 16017236439, odd instance hardware copy engines are
- * faster than even instance ones.
- * This function returns the mask involving all fast copy engines and the
- * reserved copy engine to be used as logical mask for migrate engine.
* Including the reserved copy engine is required to avoid deadlocks due to
* migrate jobs servicing the faults gets stuck behind the job that faulted.
*/
@@ -317,8 +312,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
if (hwe->class != XE_ENGINE_CLASS_COPY)
continue;
- if (!XE_WA(gt, 16017236439) ||
- xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+ if (xe_gt_is_usm_hwe(gt, hwe))
logical_mask |= BIT(hwe->logical_instance);
}
@@ -369,6 +363,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe || !logical_mask)
return ERR_PTR(-EINVAL);
+ /*
+ * XXX: Currently only reserving 1 (likely slow) BCS instance on
+ * PVC, may want to revisit if performance is needed.
+ */
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index c010ef16fbf5..a5e7da8cf944 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -191,7 +191,7 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- true, timeout_base_ms * 1000, true);
+ true, 50 * 1000, true);
preempt_enable();
out:
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 3ad2b4cfd1f0..63112ed975c4 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 13157da0089e..c9fb432d4cbd 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -940,7 +940,7 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
* zynqmp_disp_layer_find_live_format - Find format information for given
* media bus format
* @layer: The layer
- * @drm_fmt: Media bus format to search
+ * @media_bus_format: Media bus format to search
*
* Search display subsystem format information corresponding to the given media
* bus format @media_bus_format for the @layer, and return a pointer to the
@@ -981,7 +981,7 @@ u32 *zynqmp_disp_layer_drm_formats(struct zynqmp_disp_layer *layer,
unsigned int i;
u32 *formats;
- if (WARN_ON(!layer->mode == ZYNQMP_DPSUB_LAYER_NONLIVE)) {
+ if (WARN_ON(layer->mode != ZYNQMP_DPSUB_LAYER_NONLIVE)) {
*num_formats = 0;
return NULL;
}
@@ -1117,7 +1117,7 @@ void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
/**
* zynqmp_disp_layer_set_live_format - Set the live video layer format
* @layer: The layer
- * @info: The format info
+ * @media_bus_format: Media bus format to set
*
* NOTE: This function should not be used to set format for non-live video
* layer. Use zynqmp_disp_layer_set_format() instead.