From 3f866f5f04d3645970662409d2bdff3dca58b1a3 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 18 Jan 2018 18:39:55 -0600 Subject: drm/amdkfd: Use ARRAY_SIZE macro in kfd_build_sysfs_node_entry Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c6a76090a725..7783250e1c6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -677,7 +677,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, } /* All hardware blocks have the same number of attributes. */ - num_attrs = sizeof(perf_attr_iommu)/sizeof(struct kfd_perf_attr); + num_attrs = ARRAY_SIZE(perf_attr_iommu); list_for_each_entry(perf, &dev->perf_props, list) { perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr) * num_attrs + sizeof(struct attribute_group), -- cgit From 3ee2d00cfb6c0b44aeb9575c20ad8d1abf09be0f Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 4 Jan 2018 17:17:41 -0500 Subject: drm/amdkfd: Conditionally enable PCIe atomics This will be needed for most dGPUs. CC: linux-pci@vger.kernel.org Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 17 +++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + 2 files changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index a8fa33a08de3..fafe971d1d49 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -41,6 +41,7 @@ static const struct kfd_device_info kaveri_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = false, + .needs_pci_atomics = false, }; static const struct kfd_device_info carrizo_device_info = { @@ -53,6 +54,7 @@ static const struct kfd_device_info carrizo_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_pci_atomics = false, }; struct kfd_deviceid { @@ -127,6 +129,21 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, return NULL; } + if (device_info->needs_pci_atomics) { + /* Allow BIF to recode atomics to PCIe 3.0 + * AtomicOps. 32 and 64-bit requests are possible and + * must be supported. + */ + if (pci_enable_atomic_ops_to_root(pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) { + dev_info(kfd_device, + "skipped device %x:%x, PCI rejects atomics", + pdev->vendor, pdev->device); + return NULL; + } + } + kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); if (!kfd) return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 0bedcf9cc08c..e5b16209f069 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -158,6 +158,7 @@ struct kfd_device_info { uint8_t num_of_watch_points; uint16_t mqd_size_aligned; bool supports_cwsr; + bool needs_pci_atomics; }; struct kfd_mem_obj { -- cgit From d146c5a7196b4c2c2586569971a55392b501b93b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 4 Jan 2018 17:17:43 -0500 Subject: drm/amdkfd: Make sched_policy a per-device setting Some dGPUs don't support HWS. Allow them to use a per-device sched_policy that may be different from the global default. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 26 +++++++++++++++++----- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 1 + .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 3 ++- 6 files changed, 29 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 62c3d9cd6ef1..6fe24964540b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -901,7 +901,8 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, mutex_unlock(&p->mutex); - if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0) + if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && + pdd->qpd.vmid != 0) dev->kfd2kgd->set_scratch_backing_va( dev->kgd, args->va_addr, pdd->qpd.vmid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c index 3da25f7bda6b..9d4af961c5d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c @@ -33,6 +33,7 @@ #include "kfd_pm4_headers_diq.h" #include "kfd_dbgmgr.h" #include "kfd_dbgdev.h" +#include "kfd_device_queue_manager.h" static DEFINE_MUTEX(kfd_dbgmgr_mutex); @@ -83,7 +84,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) } /* get actual type of DBGDevice cpsch or not */ - if (sched_policy == KFD_SCHED_POLICY_NO_HWS) + if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) type = DBGDEV_TYPE_NODIQ; kfd_dbgdev_init(new_buff->dbgdev, pdev, type); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index fafe971d1d49..e8ff4eab63e7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -340,7 +340,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->pdev->device); pr_debug("Starting kfd with the following scheduling policy %d\n", - sched_policy); + kfd->dqm->sched_policy); goto out; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b21285afa4ea..e57781d99664 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -385,7 +385,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) prev_active = q->properties.is_active; /* Make sure the queue is unmapped before updating the MQD */ - if (sched_policy != KFD_SCHED_POLICY_NO_HWS) { + if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); if (retval) { @@ -417,7 +417,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) else if (!q->properties.is_active && prev_active) dqm->queue_count--; - if (sched_policy != KFD_SCHED_POLICY_NO_HWS) + if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) retval = map_queues_cpsch(dqm); else if (q->properties.is_active && (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || @@ -1097,7 +1097,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, alternate_aperture_base, alternate_aperture_size); - if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) + if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) program_sh_mem_settings(dqm, qpd); pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", @@ -1242,8 +1242,24 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (!dqm) return NULL; + switch (dev->device_info->asic_family) { + /* HWS is not available on Hawaii. */ + case CHIP_HAWAII: + /* HWS depends on CWSR for timely dequeue. CWSR is not + * available on Tonga. + * + * FIXME: This argument also applies to Kaveri. + */ + case CHIP_TONGA: + dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; + break; + default: + dqm->sched_policy = sched_policy; + break; + } + dqm->dev = dev; - switch (sched_policy) { + switch (dqm->sched_policy) { case KFD_SCHED_POLICY_HWS: case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: /* initialize dqm for cp scheduling */ @@ -1280,7 +1296,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.process_termination = process_termination_nocpsch; break; default: - pr_err("Invalid scheduling policy %d\n", sched_policy); + pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); goto out_free; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c61b693bfa8c..9fdc9c2a107a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -180,6 +180,7 @@ struct device_queue_manager { unsigned int *fence_addr; struct kfd_mem_obj *fence_mem; bool active_runlist; + int sched_policy; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 876380632668..7817e327ea6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -208,7 +208,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, case KFD_QUEUE_TYPE_COMPUTE: /* check if there is over subscription */ - if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && + if ((dev->dqm->sched_policy == + KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) || (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) { pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n"); -- cgit From 97672cbe3de809ef8c4ea66cce675f5da3d3df44 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 4 Jan 2018 17:17:44 -0500 Subject: drm/amdkfd: Add dGPU support to the device queue manager GFXv7 and v8 dGPUs use a different addressing mode for KFD compared to APUs (GPUVM64 vs HSA64). And dGPUs don't support MTYPE_CC. They use MTYPE_UC instead for memory that requires coherency. Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 11 +++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 + .../drm/amd/amdkfd/kfd_device_queue_manager_cik.c | 56 +++++++++++++ .../drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 93 ++++++++++++++++++++++ 4 files changed, 164 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e57781d99664..47d493ea8e4f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1308,6 +1308,17 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_KAVERI: device_queue_manager_init_cik(&dqm->asic_ops); break; + + case CHIP_HAWAII: + device_queue_manager_init_cik_hawaii(&dqm->asic_ops); + break; + + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + device_queue_manager_init_vi_tonga(&dqm->asic_ops); + break; default: WARN(1, "Unexpected ASIC family %u", dev->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 9fdc9c2a107a..68be0aaff3f4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -185,8 +185,12 @@ struct device_queue_manager { void device_queue_manager_init_cik( struct device_queue_manager_asic_ops *asic_ops); +void device_queue_manager_init_cik_hawaii( + struct device_queue_manager_asic_ops *asic_ops); void device_queue_manager_init_vi( struct device_queue_manager_asic_ops *asic_ops); +void device_queue_manager_init_vi_tonga( + struct device_queue_manager_asic_ops *asic_ops); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); unsigned int get_queues_num(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index 28e48c90c596..aed4c21417bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -34,8 +34,13 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, uint64_t alternate_aperture_size); static int update_qpd_cik(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); +static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd); void device_queue_manager_init_cik( struct device_queue_manager_asic_ops *asic_ops) @@ -45,6 +50,14 @@ void device_queue_manager_init_cik( asic_ops->init_sdma_vm = init_sdma_vm; } +void device_queue_manager_init_cik_hawaii( + struct device_queue_manager_asic_ops *asic_ops) +{ + asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik; + asic_ops->update_qpd = update_qpd_cik_hawaii; + asic_ops->init_sdma_vm = init_sdma_vm_hawaii; +} + static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) { /* In 64-bit mode, we can only control the top 3 bits of the LDS, @@ -132,6 +145,36 @@ static int update_qpd_cik(struct device_queue_manager *dqm, return 0; } +static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct kfd_process_device *pdd; + unsigned int temp; + + pdd = qpd_to_pdd(qpd); + + /* check if sh_mem_config register already configured */ + if (qpd->sh_mem_config == 0) { + qpd->sh_mem_config = + ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | + DEFAULT_MTYPE(MTYPE_NONCACHED) | + APE1_MTYPE(MTYPE_NONCACHED); + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + } + + /* On dGPU we're always in GPUVM64 addressing mode with 64-bit + * aperture addresses. + */ + temp = get_sh_mem_bases_nybble_64(pdd); + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); + + pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", + qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); + + return 0; +} + static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) { @@ -147,3 +190,16 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, q->properties.sdma_vm_addr = value; } + +static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd) +{ + /* On dGPU we're always in GPUVM64 addressing mode with 64-bit + * aperture addresses. + */ + q->properties.sdma_vm_addr = + ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index 2fbce57a2f21..fd60a116be37 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -33,10 +33,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, enum cache_policy alternate_policy, void __user *alternate_aperture_base, uint64_t alternate_aperture_size); +static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size); static int update_qpd_vi(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +static int update_qpd_vi_tonga(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); +static void init_sdma_vm_tonga(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd); void device_queue_manager_init_vi( struct device_queue_manager_asic_ops *asic_ops) @@ -46,6 +57,14 @@ void device_queue_manager_init_vi( asic_ops->init_sdma_vm = init_sdma_vm; } +void device_queue_manager_init_vi_tonga( + struct device_queue_manager_asic_ops *asic_ops) +{ + asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga; + asic_ops->update_qpd = update_qpd_vi_tonga; + asic_ops->init_sdma_vm = init_sdma_vm_tonga; +} + static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) { /* In 64-bit mode, we can only control the top 3 bits of the LDS, @@ -103,6 +122,33 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, return true; } +static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) +{ + uint32_t default_mtype; + uint32_t ape1_mtype; + + default_mtype = (default_policy == cache_policy_coherent) ? + MTYPE_UC : + MTYPE_NC; + + ape1_mtype = (alternate_policy == cache_policy_coherent) ? + MTYPE_UC : + MTYPE_NC; + + qpd->sh_mem_config = + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | + default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | + ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT; + + return true; +} + static int update_qpd_vi(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -144,6 +190,40 @@ static int update_qpd_vi(struct device_queue_manager *dqm, return 0; } +static int update_qpd_vi_tonga(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct kfd_process_device *pdd; + unsigned int temp; + + pdd = qpd_to_pdd(qpd); + + /* check if sh_mem_config register already configured */ + if (qpd->sh_mem_config == 0) { + qpd->sh_mem_config = + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | + MTYPE_UC << + SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | + MTYPE_UC << + SH_MEM_CONFIG__APE1_MTYPE__SHIFT; + + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + } + + /* On dGPU we're always in GPUVM64 addressing mode with 64-bit + * aperture addresses. + */ + temp = get_sh_mem_bases_nybble_64(pdd); + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); + + pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n", + temp, qpd->sh_mem_bases); + + return 0; +} + static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) { @@ -159,3 +239,16 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, q->properties.sdma_vm_addr = value; } + +static void init_sdma_vm_tonga(struct device_queue_manager *dqm, + struct queue *q, + struct qcm_process_device *qpd) +{ + /* On dGPU we're always in GPUVM64 addressing mode with 64-bit + * aperture addresses. + */ + q->properties.sdma_vm_addr = + ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & + SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; +} -- cgit From ee04955af6b851a4f133d2472bc65c5d8b9aa692 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 4 Jan 2018 17:17:45 -0500 Subject: drm/amdkfd: Add dGPU support to the MQD manager On dGPUs don't set ATC addressing bits and use MTYPE_UC for coherent memory. Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 7 +++++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 35 ++++++++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 21 ++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++ 4 files changed, 64 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index dfd260ef81ff..ee7061e1c466 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -29,8 +29,15 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, switch (dev->device_info->asic_family) { case CHIP_KAVERI: return mqd_manager_init_cik(type, dev); + case CHIP_HAWAII: + return mqd_manager_init_cik_hawaii(type, dev); case CHIP_CARRIZO: return mqd_manager_init_vi(type, dev); + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + return mqd_manager_init_vi_tonga(type, dev); default: WARN(1, "Unexpected ASIC family %u", dev->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index f8ef4a051e08..fbe3f83ba685 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -170,14 +170,19 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, mms); } -static int update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) +static int __update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, unsigned int atc_bit) { struct cik_mqd *m; m = get_mqd(mqd); m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | - DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN; + DEFAULT_MIN_AVAIL_SIZE; + m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; + if (atc_bit) { + m->cp_hqd_pq_control |= PQ_ATC_EN; + m->cp_hqd_ib_control |= IB_ATC_EN; + } /* * Calculating queue size which is log base 2 of actual queue size -1 @@ -202,6 +207,18 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, return 0; } +static int update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + return __update_mqd(mm, mqd, q, 1); +} + +static int update_mqd_hawaii(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + return __update_mqd(mm, mqd, q, 0); +} + static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { @@ -441,3 +458,15 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, return mqd; } +struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, + struct kfd_dev *dev) +{ + struct mqd_manager *mqd; + + mqd = mqd_manager_init_cik(type, dev); + if (!mqd) + return NULL; + if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE)) + mqd->update_mqd = update_mqd_hawaii; + return mqd; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 971aec0637dc..58221c1fc917 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -151,6 +151,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); + m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); m->cp_hqd_pq_doorbell_control = q->doorbell_off << @@ -208,6 +210,12 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, return __update_mqd(mm, mqd, q, MTYPE_CC, 1); } +static int update_mqd_tonga(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + return __update_mqd(mm, mqd, q, MTYPE_UC, 0); +} + static int destroy_mqd(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, @@ -432,3 +440,16 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, return mqd; } + +struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, + struct kfd_dev *dev) +{ + struct mqd_manager *mqd; + + mqd = mqd_manager_init_vi(type, dev); + if (!mqd) + return NULL; + if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE)) + mqd->update_mqd = update_mqd_tonga; + return mqd; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index e5b16209f069..594f85355397 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -706,8 +706,12 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, struct kfd_dev *dev); struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, struct kfd_dev *dev); +struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, + struct kfd_dev *dev); struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, struct kfd_dev *dev); +struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, + struct kfd_dev *dev); struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); void device_queue_manager_uninit(struct device_queue_manager *dqm); struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, -- cgit From 1d63669885ebc8fca13e44864f7199c3ff50cea3 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 4 Jan 2018 17:17:46 -0500 Subject: drm/amdkfd: Add dGPU support to kernel_queue_init Recognize dGPU ASIC families. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 5dc6567d4a13..69f496485331 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -297,10 +297,15 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, switch (dev->device_info->asic_family) { case CHIP_CARRIZO: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: kernel_queue_init_vi(&kq->ops_asic_specific); break; case CHIP_KAVERI: + case CHIP_HAWAII: kernel_queue_init_cik(&kq->ops_asic_specific); break; default: -- cgit From a3084e6c522f94130c9dfc26fe6458c353dbc0c9 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 4 Jan 2018 17:17:47 -0500 Subject: drm/amdkfd: Add dGPU device IDs and device info v2: remove needs_iommu field as it doesn't exists CC: linux-pci@vger.kernel.org Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 145 +++++++++++++++++++++++++++++++- 1 file changed, 143 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e8ff4eab63e7..83d6f410890e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -57,12 +57,110 @@ static const struct kfd_device_info carrizo_device_info = { .needs_pci_atomics = false, }; +static const struct kfd_device_info hawaii_device_info = { + .asic_family = CHIP_HAWAII, + .max_pasid_bits = 16, + /* max num of queues for KV.TODO should be a dynamic value */ + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = false, + .needs_pci_atomics = false, +}; + +static const struct kfd_device_info tonga_device_info = { + .asic_family = CHIP_TONGA, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = false, + .needs_pci_atomics = true, +}; + +static const struct kfd_device_info tonga_vf_device_info = { + .asic_family = CHIP_TONGA, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = false, + .needs_pci_atomics = false, +}; + +static const struct kfd_device_info fiji_device_info = { + .asic_family = CHIP_FIJI, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_pci_atomics = true, +}; + +static const struct kfd_device_info fiji_vf_device_info = { + .asic_family = CHIP_FIJI, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_pci_atomics = false, +}; + + +static const struct kfd_device_info polaris10_device_info = { + .asic_family = CHIP_POLARIS10, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_pci_atomics = true, +}; + +static const struct kfd_device_info polaris10_vf_device_info = { + .asic_family = CHIP_POLARIS10, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_pci_atomics = false, +}; + +static const struct kfd_device_info polaris11_device_info = { + .asic_family = CHIP_POLARIS11, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_pci_atomics = true, +}; + + struct kfd_deviceid { unsigned short did; const struct kfd_device_info *device_info; }; -/* Please keep this sorted by increasing device id. */ static const struct kfd_deviceid supported_devices[] = { { 0x1304, &kaveri_device_info }, /* Kaveri */ { 0x1305, &kaveri_device_info }, /* Kaveri */ @@ -90,7 +188,50 @@ static const struct kfd_deviceid supported_devices[] = { { 0x9874, &carrizo_device_info }, /* Carrizo */ { 0x9875, &carrizo_device_info }, /* Carrizo */ { 0x9876, &carrizo_device_info }, /* Carrizo */ - { 0x9877, &carrizo_device_info } /* Carrizo */ + { 0x9877, &carrizo_device_info }, /* Carrizo */ + { 0x67A0, &hawaii_device_info }, /* Hawaii */ + { 0x67A1, &hawaii_device_info }, /* Hawaii */ + { 0x67A2, &hawaii_device_info }, /* Hawaii */ + { 0x67A8, &hawaii_device_info }, /* Hawaii */ + { 0x67A9, &hawaii_device_info }, /* Hawaii */ + { 0x67AA, &hawaii_device_info }, /* Hawaii */ + { 0x67B0, &hawaii_device_info }, /* Hawaii */ + { 0x67B1, &hawaii_device_info }, /* Hawaii */ + { 0x67B8, &hawaii_device_info }, /* Hawaii */ + { 0x67B9, &hawaii_device_info }, /* Hawaii */ + { 0x67BA, &hawaii_device_info }, /* Hawaii */ + { 0x67BE, &hawaii_device_info }, /* Hawaii */ + { 0x6920, &tonga_device_info }, /* Tonga */ + { 0x6921, &tonga_device_info }, /* Tonga */ + { 0x6928, &tonga_device_info }, /* Tonga */ + { 0x6929, &tonga_device_info }, /* Tonga */ + { 0x692B, &tonga_device_info }, /* Tonga */ + { 0x692F, &tonga_vf_device_info }, /* Tonga vf */ + { 0x6938, &tonga_device_info }, /* Tonga */ + { 0x6939, &tonga_device_info }, /* Tonga */ + { 0x7300, &fiji_device_info }, /* Fiji */ + { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/ + { 0x67C0, &polaris10_device_info }, /* Polaris10 */ + { 0x67C1, &polaris10_device_info }, /* Polaris10 */ + { 0x67C2, &polaris10_device_info }, /* Polaris10 */ + { 0x67C4, &polaris10_device_info }, /* Polaris10 */ + { 0x67C7, &polaris10_device_info }, /* Polaris10 */ + { 0x67C8, &polaris10_device_info }, /* Polaris10 */ + { 0x67C9, &polaris10_device_info }, /* Polaris10 */ + { 0x67CA, &polaris10_device_info }, /* Polaris10 */ + { 0x67CC, &polaris10_device_info }, /* Polaris10 */ + { 0x67CF, &polaris10_device_info }, /* Polaris10 */ + { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ + { 0x67DF, &polaris10_device_info }, /* Polaris10 */ + { 0x67E0, &polaris11_device_info }, /* Polaris11 */ + { 0x67E1, &polaris11_device_info }, /* Polaris11 */ + { 0x67E3, &polaris11_device_info }, /* Polaris11 */ + { 0x67E7, &polaris11_device_info }, /* Polaris11 */ + { 0x67E8, &polaris11_device_info }, /* Polaris11 */ + { 0x67E9, &polaris11_device_info }, /* Polaris11 */ + { 0x67EB, &polaris11_device_info }, /* Polaris11 */ + { 0x67EF, &polaris11_device_info }, /* Polaris11 */ + { 0x67FF, &polaris11_device_info }, /* Polaris11 */ }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, -- cgit From 64d1c3a43a6fb5cef32a085bc17cbbe31945a651 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 8 Dec 2017 19:22:12 -0500 Subject: drm/amdkfd: Centralize IOMMUv2 code and make it conditional dGPUs work without IOMMUv2. Make IOMMUv2 initialization dependent on ASIC information. Also allow building KFD without IOMMUv2 support. This is still useful for dGPUs and prepares for enabling KFD on architectures that don't support AMD IOMMUv2. v2: * Centralize IOMMUv2 code to avoid #ifdefs in too many places v3: * Imply AMD_IOMMU_V2 in Kconfig Signed-off-by: Felix Kuehling Acked-by: Christian Konig Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/Kconfig | 3 +- drivers/gpu/drm/amd/amdkfd/Makefile | 4 + drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 127 +++-------- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 3 + drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 357 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_iommu.h | 78 +++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 138 +----------- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 16 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 6 +- 11 files changed, 495 insertions(+), 265 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_iommu.c create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_iommu.h (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index bc5a2945bd2b..ed2f06c9f346 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -4,6 +4,7 @@ config HSA_AMD tristate "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && AMD_IOMMU_V2 && X86_64 + depends on DRM_AMDGPU && X86_64 + imply AMD_IOMMU_V2 help Enable this if you want to use HSA features on AMD GPU devices. diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index a317e76ffb5e..0d0242240c47 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -37,6 +37,10 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_interrupt.o kfd_events.o cik_event_interrupt.o \ kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o +ifneq ($(CONFIG_AMD_IOMMU_V2),) +amdkfd-y += kfd_iommu.o +endif + amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o obj-$(CONFIG_HSA_AMD) += amdkfd.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 2bc2816767a7..7493f47e7fe1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -22,10 +22,10 @@ #include #include -#include #include "kfd_crat.h" #include "kfd_priv.h" #include "kfd_topology.h" +#include "kfd_iommu.h" /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. * GPU processor ID are expressed with Bit[31]=1. @@ -1037,15 +1037,11 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, struct crat_subtype_generic *sub_type_hdr; struct crat_subtype_computeunit *cu; struct kfd_cu_info cu_info; - struct amd_iommu_device_info iommu_info; int avail_size = *size; uint32_t total_num_of_cu; int num_of_cache_entries = 0; int cache_mem_filled = 0; int ret = 0; - const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | - AMD_IOMMU_DEVICE_FLAG_PRI_SUP | - AMD_IOMMU_DEVICE_FLAG_PASID_SUP; struct kfd_local_mem_info local_mem_info; if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU) @@ -1106,12 +1102,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, /* Check if this node supports IOMMU. During parsing this flag will * translate to HSA_CAP_ATS_PRESENT */ - iommu_info.flags = 0; - if (amd_iommu_device_info(kdev->pdev, &iommu_info) == 0) { - if ((iommu_info.flags & required_iommu_flags) == - required_iommu_flags) - cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; - } + if (!kfd_iommu_check_device(kdev)) + cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; crat_table->length += sub_type_hdr->length; crat_table->total_entries++; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 83d6f410890e..4ac2d61b65d5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -20,7 +20,9 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2) #include +#endif #include #include #include @@ -28,9 +30,11 @@ #include "kfd_device_queue_manager.h" #include "kfd_pm4_headers_vi.h" #include "cwsr_trap_handler_gfx8.asm" +#include "kfd_iommu.h" #define MQD_SIZE_ALIGNED 768 +#ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { .asic_family = CHIP_KAVERI, .max_pasid_bits = 16, @@ -41,6 +45,7 @@ static const struct kfd_device_info kaveri_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = false, + .needs_iommu_device = true, .needs_pci_atomics = false, }; @@ -54,8 +59,10 @@ static const struct kfd_device_info carrizo_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_iommu_device = true, .needs_pci_atomics = false, }; +#endif static const struct kfd_device_info hawaii_device_info = { .asic_family = CHIP_HAWAII, @@ -67,6 +74,7 @@ static const struct kfd_device_info hawaii_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = false, + .needs_iommu_device = false, .needs_pci_atomics = false, }; @@ -79,6 +87,7 @@ static const struct kfd_device_info tonga_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = false, + .needs_iommu_device = false, .needs_pci_atomics = true, }; @@ -91,6 +100,7 @@ static const struct kfd_device_info tonga_vf_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = false, + .needs_iommu_device = false, .needs_pci_atomics = false, }; @@ -103,6 +113,7 @@ static const struct kfd_device_info fiji_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_iommu_device = false, .needs_pci_atomics = true, }; @@ -115,6 +126,7 @@ static const struct kfd_device_info fiji_vf_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_iommu_device = false, .needs_pci_atomics = false, }; @@ -128,6 +140,7 @@ static const struct kfd_device_info polaris10_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_iommu_device = false, .needs_pci_atomics = true, }; @@ -140,6 +153,7 @@ static const struct kfd_device_info polaris10_vf_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_iommu_device = false, .needs_pci_atomics = false, }; @@ -152,6 +166,7 @@ static const struct kfd_device_info polaris11_device_info = { .num_of_watch_points = 4, .mqd_size_aligned = MQD_SIZE_ALIGNED, .supports_cwsr = true, + .needs_iommu_device = false, .needs_pci_atomics = true, }; @@ -162,6 +177,7 @@ struct kfd_deviceid { }; static const struct kfd_deviceid supported_devices[] = { +#ifdef KFD_SUPPORT_IOMMU_V2 { 0x1304, &kaveri_device_info }, /* Kaveri */ { 0x1305, &kaveri_device_info }, /* Kaveri */ { 0x1306, &kaveri_device_info }, /* Kaveri */ @@ -189,6 +205,7 @@ static const struct kfd_deviceid supported_devices[] = { { 0x9875, &carrizo_device_info }, /* Carrizo */ { 0x9876, &carrizo_device_info }, /* Carrizo */ { 0x9877, &carrizo_device_info }, /* Carrizo */ +#endif { 0x67A0, &hawaii_device_info }, /* Hawaii */ { 0x67A1, &hawaii_device_info }, /* Hawaii */ { 0x67A2, &hawaii_device_info }, /* Hawaii */ @@ -302,77 +319,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, return kfd; } -static bool device_iommu_pasid_init(struct kfd_dev *kfd) -{ - const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | - AMD_IOMMU_DEVICE_FLAG_PRI_SUP | - AMD_IOMMU_DEVICE_FLAG_PASID_SUP; - - struct amd_iommu_device_info iommu_info; - unsigned int pasid_limit; - int err; - - err = amd_iommu_device_info(kfd->pdev, &iommu_info); - if (err < 0) { - dev_err(kfd_device, - "error getting iommu info. is the iommu enabled?\n"); - return false; - } - - if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { - dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n", - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) - != 0); - return false; - } - - pasid_limit = min_t(unsigned int, - (unsigned int)(1 << kfd->device_info->max_pasid_bits), - iommu_info.max_pasids); - - if (!kfd_set_pasid_limit(pasid_limit)) { - dev_err(kfd_device, "error setting pasid limit\n"); - return false; - } - - return true; -} - -static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) -{ - struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); - - if (dev) - kfd_process_iommu_unbind_callback(dev, pasid); -} - -/* - * This function called by IOMMU driver on PPR failure - */ -static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, - unsigned long address, u16 flags) -{ - struct kfd_dev *dev; - - dev_warn(kfd_device, - "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", - PCI_BUS_NUM(pdev->devfn), - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), - pasid, - address, - flags); - - dev = kfd_device_by_pci_dev(pdev); - if (!WARN_ON(!dev)) - kfd_signal_iommu_event(dev, pasid, address, - flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); - - return AMD_IOMMU_INV_PRI_RSP_INVALID; -} - static void kfd_cwsr_init(struct kfd_dev *kfd) { if (cwsr_enable && kfd->device_info->supports_cwsr) { @@ -462,11 +408,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto device_queue_manager_error; } - if (!device_iommu_pasid_init(kfd)) { - dev_err(kfd_device, - "Error initializing iommuv2 for device %x:%x\n", - kfd->pdev->vendor, kfd->pdev->device); - goto device_iommu_pasid_error; + if (kfd_iommu_device_init(kfd)) { + dev_err(kfd_device, "Error initializing iommuv2\n"); + goto device_iommu_error; } kfd_cwsr_init(kfd); @@ -486,7 +430,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto out; kfd_resume_error: -device_iommu_pasid_error: +device_iommu_error: device_queue_manager_uninit(kfd->dqm); device_queue_manager_error: kfd_interrupt_exit(kfd); @@ -527,11 +471,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) kfd->dqm->ops.stop(kfd->dqm); - kfd_unbind_processes_from_device(kfd); - - amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); - amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); - amd_iommu_free_device(kfd->pdev); + kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd) @@ -546,19 +486,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd) static int kfd_resume(struct kfd_dev *kfd) { int err = 0; - unsigned int pasid_limit = kfd_get_pasid_limit(); - - err = amd_iommu_init_device(kfd->pdev, pasid_limit); - if (err) - return -ENXIO; - amd_iommu_set_invalidate_ctx_cb(kfd->pdev, - iommu_pasid_shutdown_callback); - amd_iommu_set_invalid_ppr_cb(kfd->pdev, - iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd); - if (err) - goto processes_bind_error; + err = kfd_iommu_resume(kfd); + if (err) { + dev_err(kfd_device, + "Failed to resume IOMMU for device %x:%x\n", + kfd->pdev->vendor, kfd->pdev->device); + return err; + } err = kfd->dqm->ops.start(kfd->dqm); if (err) { @@ -571,9 +506,7 @@ static int kfd_resume(struct kfd_dev *kfd) return err; dqm_start_error: -processes_bind_error: - amd_iommu_free_device(kfd->pdev); - + kfd_iommu_suspend(kfd); return err; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 93aae5c1e78b..6fb9c0d46d63 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -30,6 +30,7 @@ #include #include "kfd_priv.h" #include "kfd_events.h" +#include "kfd_iommu.h" #include /* @@ -837,6 +838,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, } } +#ifdef KFD_SUPPORT_IOMMU_V2 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, unsigned long address, bool is_write_requested, bool is_execute_requested) @@ -905,6 +907,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, mutex_unlock(&p->event_mutex); kfd_unref_process(p); } +#endif /* KFD_SUPPORT_IOMMU_V2 */ void kfd_signal_hw_exception_event(unsigned int pasid) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c new file mode 100644 index 000000000000..c71817963eea --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -0,0 +1,357 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "kfd_priv.h" +#include "kfd_dbgmgr.h" +#include "kfd_topology.h" +#include "kfd_iommu.h" + +static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | + AMD_IOMMU_DEVICE_FLAG_PRI_SUP | + AMD_IOMMU_DEVICE_FLAG_PASID_SUP; + +/** kfd_iommu_check_device - Check whether IOMMU is available for device + */ +int kfd_iommu_check_device(struct kfd_dev *kfd) +{ + struct amd_iommu_device_info iommu_info; + int err; + + if (!kfd->device_info->needs_iommu_device) + return -ENODEV; + + iommu_info.flags = 0; + err = amd_iommu_device_info(kfd->pdev, &iommu_info); + if (err) + return err; + + if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) + return -ENODEV; + + return 0; +} + +/** kfd_iommu_device_init - Initialize IOMMU for device + */ +int kfd_iommu_device_init(struct kfd_dev *kfd) +{ + struct amd_iommu_device_info iommu_info; + unsigned int pasid_limit; + int err; + + if (!kfd->device_info->needs_iommu_device) + return 0; + + iommu_info.flags = 0; + err = amd_iommu_device_info(kfd->pdev, &iommu_info); + if (err < 0) { + dev_err(kfd_device, + "error getting iommu info. is the iommu enabled?\n"); + return -ENODEV; + } + + if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { + dev_err(kfd_device, + "error required iommu flags ats %i, pri %i, pasid %i\n", + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) + != 0); + return -ENODEV; + } + + pasid_limit = min_t(unsigned int, + (unsigned int)(1 << kfd->device_info->max_pasid_bits), + iommu_info.max_pasids); + + if (!kfd_set_pasid_limit(pasid_limit)) { + dev_err(kfd_device, "error setting pasid limit\n"); + return -EBUSY; + } + + return 0; +} + +/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process + * + * Binds the given process to the given device using its PASID. This + * enables IOMMUv2 address translation for the process on the device. + * + * This function assumes that the process mutex is held. + */ +int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) +{ + struct kfd_dev *dev = pdd->dev; + struct kfd_process *p = pdd->process; + int err; + + if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND) + return 0; + + if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { + pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n"); + return -EINVAL; + } + + err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread); + if (!err) + pdd->bound = PDD_BOUND; + + return err; +} + +/** kfd_iommu_unbind_process - Unbind process from all devices + * + * This removes all IOMMU device bindings of the process. To be used + * before process termination. + */ +void kfd_iommu_unbind_process(struct kfd_process *p) +{ + struct kfd_process_device *pdd; + + list_for_each_entry(pdd, &p->per_device_data, per_device_list) + if (pdd->bound == PDD_BOUND) + amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid); +} + +/* Callback for process shutdown invoked by the IOMMU driver */ +static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) +{ + struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); + struct kfd_process *p; + struct kfd_process_device *pdd; + + if (!dev) + return; + + /* + * Look for the process that matches the pasid. If there is no such + * process, we either released it in amdkfd's own notifier, or there + * is a bug. Unfortunately, there is no way to tell... + */ + p = kfd_lookup_process_by_pasid(pasid); + if (!p) + return; + + pr_debug("Unbinding process %d from IOMMU\n", pasid); + + mutex_lock(kfd_get_dbgmgr_mutex()); + + if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { + if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { + kfd_dbgmgr_destroy(dev->dbgmgr); + dev->dbgmgr = NULL; + } + } + + mutex_unlock(kfd_get_dbgmgr_mutex()); + + mutex_lock(&p->mutex); + + pdd = kfd_get_process_device_data(dev, p); + if (pdd) + /* For GPU relying on IOMMU, we need to dequeue here + * when PASID is still bound. + */ + kfd_process_dequeue_from_device(pdd); + + mutex_unlock(&p->mutex); + + kfd_unref_process(p); +} + +/* This function called by IOMMU driver on PPR failure */ +static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, + unsigned long address, u16 flags) +{ + struct kfd_dev *dev; + + dev_warn(kfd_device, + "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", + PCI_BUS_NUM(pdev->devfn), + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), + pasid, + address, + flags); + + dev = kfd_device_by_pci_dev(pdev); + if (!WARN_ON(!dev)) + kfd_signal_iommu_event(dev, pasid, address, + flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); + + return AMD_IOMMU_INV_PRI_RSP_INVALID; +} + +/* + * Bind processes do the device that have been temporarily unbound + * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device. + */ +static int kfd_bind_processes_to_device(struct kfd_dev *kfd) +{ + struct kfd_process_device *pdd; + struct kfd_process *p; + unsigned int temp; + int err = 0; + + int idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + mutex_lock(&p->mutex); + pdd = kfd_get_process_device_data(kfd, p); + + if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) { + mutex_unlock(&p->mutex); + continue; + } + + err = amd_iommu_bind_pasid(kfd->pdev, p->pasid, + p->lead_thread); + if (err < 0) { + pr_err("Unexpected pasid %d binding failure\n", + p->pasid); + mutex_unlock(&p->mutex); + break; + } + + pdd->bound = PDD_BOUND; + mutex_unlock(&p->mutex); + } + + srcu_read_unlock(&kfd_processes_srcu, idx); + + return err; +} + +/* + * Mark currently bound processes as PDD_BOUND_SUSPENDED. These + * processes will be restored to PDD_BOUND state in + * kfd_bind_processes_to_device. + */ +static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) +{ + struct kfd_process_device *pdd; + struct kfd_process *p; + unsigned int temp; + + int idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + mutex_lock(&p->mutex); + pdd = kfd_get_process_device_data(kfd, p); + + if (WARN_ON(!pdd)) { + mutex_unlock(&p->mutex); + continue; + } + + if (pdd->bound == PDD_BOUND) + pdd->bound = PDD_BOUND_SUSPENDED; + mutex_unlock(&p->mutex); + } + + srcu_read_unlock(&kfd_processes_srcu, idx); +} + +/** kfd_iommu_suspend - Prepare IOMMU for suspend + * + * This unbinds processes from the device and disables the IOMMU for + * the device. + */ +void kfd_iommu_suspend(struct kfd_dev *kfd) +{ + if (!kfd->device_info->needs_iommu_device) + return; + + kfd_unbind_processes_from_device(kfd); + + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); + amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); + amd_iommu_free_device(kfd->pdev); +} + +/** kfd_iommu_resume - Restore IOMMU after resume + * + * This reinitializes the IOMMU for the device and re-binds previously + * suspended processes to the device. + */ +int kfd_iommu_resume(struct kfd_dev *kfd) +{ + unsigned int pasid_limit; + int err; + + if (!kfd->device_info->needs_iommu_device) + return 0; + + pasid_limit = kfd_get_pasid_limit(); + + err = amd_iommu_init_device(kfd->pdev, pasid_limit); + if (err) + return -ENXIO; + + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, + iommu_pasid_shutdown_callback); + amd_iommu_set_invalid_ppr_cb(kfd->pdev, + iommu_invalid_ppr_cb); + + err = kfd_bind_processes_to_device(kfd); + if (err) { + amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); + amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); + amd_iommu_free_device(kfd->pdev); + return err; + } + + return 0; +} + +extern bool amd_iommu_pc_supported(void); +extern u8 amd_iommu_pc_get_max_banks(u16 devid); +extern u8 amd_iommu_pc_get_max_counters(u16 devid); + +/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology + */ +int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) +{ + struct kfd_perf_properties *props; + + if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT)) + return 0; + + if (!amd_iommu_pc_supported()) + return 0; + + props = kfd_alloc_struct(props); + if (!props) + return -ENOMEM; + strcpy(props->block_name, "iommu"); + props->max_concurrent = amd_iommu_pc_get_max_banks(0) * + amd_iommu_pc_get_max_counters(0); /* assume one iommu */ + list_add_tail(&props->list, &kdev->perf_props); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h new file mode 100644 index 000000000000..dd23d9fdf6a8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h @@ -0,0 +1,78 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __KFD_IOMMU_H__ +#define __KFD_IOMMU_H__ + +#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2) + +#define KFD_SUPPORT_IOMMU_V2 + +int kfd_iommu_check_device(struct kfd_dev *kfd); +int kfd_iommu_device_init(struct kfd_dev *kfd); + +int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd); +void kfd_iommu_unbind_process(struct kfd_process *p); + +void kfd_iommu_suspend(struct kfd_dev *kfd); +int kfd_iommu_resume(struct kfd_dev *kfd); + +int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev); + +#else + +static inline int kfd_iommu_check_device(struct kfd_dev *kfd) +{ + return -ENODEV; +} +static inline int kfd_iommu_device_init(struct kfd_dev *kfd) +{ + return 0; +} + +static inline int kfd_iommu_bind_process_to_device( + struct kfd_process_device *pdd) +{ + return 0; +} +static inline void kfd_iommu_unbind_process(struct kfd_process *p) +{ + /* empty */ +} + +static inline void kfd_iommu_suspend(struct kfd_dev *kfd) +{ + /* empty */ +} +static inline int kfd_iommu_resume(struct kfd_dev *kfd) +{ + return 0; +} + +static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) +{ + return 0; +} + +#endif /* defined(CONFIG_AMD_IOMMU_V2) */ + +#endif /* __KFD_IOMMU_H__ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 594f85355397..f12eb5d98be8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -158,6 +158,7 @@ struct kfd_device_info { uint8_t num_of_watch_points; uint16_t mqd_size_aligned; bool supports_cwsr; + bool needs_iommu_device; bool needs_pci_atomics; }; @@ -517,15 +518,15 @@ struct kfd_process_device { uint64_t scratch_base; uint64_t scratch_limit; - /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ - enum kfd_pdd_bound bound; - /* Flag used to tell the pdd has dequeued from the dqm. * This is used to prevent dev->dqm->ops.process_termination() from * being called twice when it is already called in IOMMU callback * function. */ bool already_dequeued; + + /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ + enum kfd_pdd_bound bound; }; #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) @@ -590,6 +591,10 @@ struct kfd_process { bool signal_event_limit_reached; }; +#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ +extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); +extern struct srcu_struct kfd_processes_srcu; + /** * Ioctl function type. * @@ -617,9 +622,6 @@ void kfd_unref_process(struct kfd_process *p); struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, struct kfd_process *p); -int kfd_bind_processes_to_device(struct kfd_dev *dev); -void kfd_unbind_processes_from_device(struct kfd_dev *dev); -void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid); struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, struct kfd_process *p); struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 4ff5f0fe6db8..e9aee76ceba9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -35,16 +35,16 @@ struct mm_struct; #include "kfd_priv.h" #include "kfd_dbgmgr.h" +#include "kfd_iommu.h" /* * List of struct kfd_process (field kfd_process). * Unique/indexed by mm_struct* */ -#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ -static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); +DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); static DEFINE_MUTEX(kfd_processes_mutex); -DEFINE_STATIC_SRCU(kfd_processes_srcu); +DEFINE_SRCU(kfd_processes_srcu); static struct workqueue_struct *kfd_process_wq; @@ -173,14 +173,8 @@ static void kfd_process_wq_release(struct work_struct *work) { struct kfd_process *p = container_of(work, struct kfd_process, release_work); - struct kfd_process_device *pdd; - pr_debug("Releasing process (pasid %d) in workqueue\n", p->pasid); - - list_for_each_entry(pdd, &p->per_device_data, per_device_list) { - if (pdd->bound == PDD_BOUND) - amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid); - } + kfd_iommu_unbind_process(p); kfd_process_destroy_pdds(p); @@ -429,133 +423,13 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, return ERR_PTR(-ENOMEM); } - if (pdd->bound == PDD_BOUND) { - return pdd; - } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { - pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n"); - return ERR_PTR(-EINVAL); - } - - err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread); - if (err < 0) + err = kfd_iommu_bind_process_to_device(pdd); + if (err) return ERR_PTR(err); - pdd->bound = PDD_BOUND; - return pdd; } -/* - * Bind processes do the device that have been temporarily unbound - * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device. - */ -int kfd_bind_processes_to_device(struct kfd_dev *dev) -{ - struct kfd_process_device *pdd; - struct kfd_process *p; - unsigned int temp; - int err = 0; - - int idx = srcu_read_lock(&kfd_processes_srcu); - - hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(dev, p); - - if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) { - mutex_unlock(&p->mutex); - continue; - } - - err = amd_iommu_bind_pasid(dev->pdev, p->pasid, - p->lead_thread); - if (err < 0) { - pr_err("Unexpected pasid %d binding failure\n", - p->pasid); - mutex_unlock(&p->mutex); - break; - } - - pdd->bound = PDD_BOUND; - mutex_unlock(&p->mutex); - } - - srcu_read_unlock(&kfd_processes_srcu, idx); - - return err; -} - -/* - * Mark currently bound processes as PDD_BOUND_SUSPENDED. These - * processes will be restored to PDD_BOUND state in - * kfd_bind_processes_to_device. - */ -void kfd_unbind_processes_from_device(struct kfd_dev *dev) -{ - struct kfd_process_device *pdd; - struct kfd_process *p; - unsigned int temp; - - int idx = srcu_read_lock(&kfd_processes_srcu); - - hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(dev, p); - - if (WARN_ON(!pdd)) { - mutex_unlock(&p->mutex); - continue; - } - - if (pdd->bound == PDD_BOUND) - pdd->bound = PDD_BOUND_SUSPENDED; - mutex_unlock(&p->mutex); - } - - srcu_read_unlock(&kfd_processes_srcu, idx); -} - -void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid) -{ - struct kfd_process *p; - struct kfd_process_device *pdd; - - /* - * Look for the process that matches the pasid. If there is no such - * process, we either released it in amdkfd's own notifier, or there - * is a bug. Unfortunately, there is no way to tell... - */ - p = kfd_lookup_process_by_pasid(pasid); - if (!p) - return; - - pr_debug("Unbinding process %d from IOMMU\n", pasid); - - mutex_lock(kfd_get_dbgmgr_mutex()); - - if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { - if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { - kfd_dbgmgr_destroy(dev->dbgmgr); - dev->dbgmgr = NULL; - } - } - - mutex_unlock(kfd_get_dbgmgr_mutex()); - - mutex_lock(&p->mutex); - - pdd = kfd_get_process_device_data(dev, p); - if (pdd) - /* For GPU relying on IOMMU, we need to dequeue here - * when PASID is still bound. - */ - kfd_process_dequeue_from_device(pdd); - - mutex_unlock(&p->mutex); - - kfd_unref_process(p); -} - struct kfd_process_device *kfd_get_first_process_device_data( struct kfd_process *p) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 7783250e1c6d..250615535563 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -35,6 +35,7 @@ #include "kfd_crat.h" #include "kfd_topology.h" #include "kfd_device_queue_manager.h" +#include "kfd_iommu.h" /* topology_device_list - Master list of all topology devices */ static struct list_head topology_device_list; @@ -875,19 +876,8 @@ static void find_system_memory(const struct dmi_header *dm, */ static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev) { - struct kfd_perf_properties *props; - - if (amd_iommu_pc_supported()) { - props = kfd_alloc_struct(props); - if (!props) - return -ENOMEM; - strcpy(props->block_name, "iommu"); - props->max_concurrent = amd_iommu_pc_get_max_banks(0) * - amd_iommu_pc_get_max_counters(0); /* assume one iommu */ - list_add_tail(&props->list, &kdev->perf_props); - } - - return 0; + /* These are the only counters supported so far */ + return kfd_iommu_add_perf_counters(kdev); } /* kfd_add_non_crat_information - Add information that is not currently diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 53fca1f45401..c0be2be6dca5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -25,7 +25,7 @@ #include #include -#include "kfd_priv.h" +#include "kfd_crat.h" #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128 @@ -183,8 +183,4 @@ struct kfd_topology_device *kfd_create_topology_device( struct list_head *device_list); void kfd_release_topology_device_list(struct list_head *device_list); -extern bool amd_iommu_pc_supported(void); -extern u8 amd_iommu_pc_get_max_banks(u16 devid); -extern u8 amd_iommu_pc_get_max_counters(u16 devid); - #endif /* __KFD_TOPOLOGY_H__ */ -- cgit From 4252bf686622f6c71958c4fabbcb6a64deba1cf7 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Tue, 6 Feb 2018 20:32:42 -0500 Subject: drm/amdkfd: Remove unaligned memory access Unaligned atomic operations can cause problems on some CPU architectures. Use simpler bitmask operations instead. Atomic bit manipulations are not necessary since dqm->lock is held during these operations. Signed-off-by: Harish Kasiviswanathan Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 25 ++++++++-------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 47d493ea8e4f..1a28dc2c661e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -118,9 +118,8 @@ static int allocate_vmid(struct device_queue_manager *dqm, if (dqm->vmid_bitmap == 0) return -ENOMEM; - bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, - dqm->dev->vm_info.vmid_num_kfd); - clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap); + bit = ffs(dqm->vmid_bitmap) - 1; + dqm->vmid_bitmap &= ~(1 << bit); allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; pr_debug("vmid allocation %d\n", allocated_vmid); @@ -142,7 +141,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm, /* Release the vmid mapping */ set_pasid_vmid_mapping(dqm, 0, qpd->vmid); - set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); + dqm->vmid_bitmap |= (1 << bit); qpd->vmid = 0; q->properties.vmid = 0; } @@ -223,12 +222,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) continue; if (dqm->allocated_queues[pipe] != 0) { - bit = find_first_bit( - (unsigned long *)&dqm->allocated_queues[pipe], - get_queues_per_pipe(dqm)); - - clear_bit(bit, - (unsigned long *)&dqm->allocated_queues[pipe]); + bit = ffs(dqm->allocated_queues[pipe]) - 1; + dqm->allocated_queues[pipe] &= ~(1 << bit); q->pipe = pipe; q->queue = bit; set = true; @@ -249,7 +244,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) static inline void deallocate_hqd(struct device_queue_manager *dqm, struct queue *q) { - set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); + dqm->allocated_queues[q->pipe] |= (1 << q->queue); } static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, @@ -589,10 +584,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, if (dqm->sdma_bitmap == 0) return -ENOMEM; - bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap, - CIK_SDMA_QUEUES); - - clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap); + bit = ffs(dqm->sdma_bitmap) - 1; + dqm->sdma_bitmap &= ~(1 << bit); *sdma_queue_id = bit; return 0; @@ -603,7 +596,7 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm, { if (sdma_queue_id >= CIK_SDMA_QUEUES) return; - set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap); + dqm->sdma_bitmap |= (1 << sdma_queue_id); } static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, -- cgit From 403575c44e61722ae443b47df66e188b367d7324 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 6 Feb 2018 20:32:44 -0500 Subject: drm/amdkfd: Add GPUVM virtual address space to PDD Create/destroy the GPUVM context during PDD creation/destruction. Get VM page table base and program it during process registration (HWS) or VMID allocation (non-HWS). v2: * Used dev instead of pdd->dev in kfd_flush_tlb Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 20 +++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 13 +++++++++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 33 ++++++++++++++++++++++ 3 files changed, 66 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 1a28dc2c661e..b7d06395d592 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -129,6 +129,15 @@ static int allocate_vmid(struct device_queue_manager *dqm, set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); program_sh_mem_settings(dqm, qpd); + /* qpd->page_table_base is set earlier when register_process() + * is called, i.e. when the first queue is created. + */ + dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, + qpd->vmid, + qpd->page_table_base); + /* invalidate the VM context after pasid and vmid mapping is set up */ + kfd_flush_tlb(qpd_to_pdd(qpd)); + return 0; } @@ -138,6 +147,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm, { int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; + kfd_flush_tlb(qpd_to_pdd(qpd)); + /* Release the vmid mapping */ set_pasid_vmid_mapping(dqm, 0, qpd->vmid); @@ -450,6 +461,8 @@ static int register_process(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { struct device_process_node *n; + struct kfd_process_device *pdd; + uint32_t pd_base; int retval; n = kzalloc(sizeof(*n), GFP_KERNEL); @@ -458,9 +471,16 @@ static int register_process(struct device_queue_manager *dqm, n->qpd = qpd; + pdd = qpd_to_pdd(qpd); + /* Retrieve PD base */ + pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); + mutex_lock(&dqm->lock); list_add(&n->list, &dqm->queues); + /* Update PD Base in QPD */ + qpd->page_table_base = pd_base; + retval = dqm->asic_ops.update_qpd(dqm, qpd); dqm->processes_count++; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f12eb5d98be8..56c2e368f702 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -518,6 +518,9 @@ struct kfd_process_device { uint64_t scratch_base; uint64_t scratch_limit; + /* VM context for GPUVM allocations */ + void *vm; + /* Flag used to tell the pdd has dequeued from the dqm. * This is used to prevent dev->dqm->ops.process_termination() from * being called twice when it is already called in IOMMU callback @@ -589,6 +592,14 @@ struct kfd_process { size_t signal_mapped_size; size_t signal_event_count; bool signal_event_limit_reached; + + /* Information used for memory eviction */ + void *kgd_process_info; + /* Eviction fence that is attached to all the BOs of this process. The + * fence will be triggered during eviction and new one will be created + * during restore + */ + struct dma_fence *ef; }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ @@ -802,6 +813,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, uint64_t *event_page_offset, uint32_t *event_slot_index); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); +void kfd_flush_tlb(struct kfd_process_device *pdd); + int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); /* Debugfs */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index e9aee76ceba9..cf4fa25cc430 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -34,6 +34,7 @@ struct mm_struct; #include "kfd_priv.h" +#include "kfd_device_queue_manager.h" #include "kfd_dbgmgr.h" #include "kfd_iommu.h" @@ -154,6 +155,10 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", pdd->dev->id, p->pasid); + if (pdd->vm) + pdd->dev->kfd2kgd->destroy_process_vm( + pdd->dev->kgd, pdd->vm); + list_del(&pdd->per_device_list); if (pdd->qpd.cwsr_kaddr) @@ -177,6 +182,7 @@ static void kfd_process_wq_release(struct work_struct *work) kfd_iommu_unbind_process(p); kfd_process_destroy_pdds(p); + dma_fence_put(p->ef); kfd_event_free_process(p); @@ -401,7 +407,18 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->already_dequeued = false; list_add(&pdd->per_device_list, &p->per_device_data); + /* Create the GPUVM context for this specific device */ + if (dev->kfd2kgd->create_process_vm(dev->kgd, &pdd->vm, + &p->kgd_process_info, &p->ef)) { + pr_err("Failed to create process VM object\n"); + goto err_create_pdd; + } return pdd; + +err_create_pdd: + list_del(&pdd->per_device_list); + kfree(pdd); + return NULL; } /* @@ -507,6 +524,22 @@ int kfd_reserved_mem_mmap(struct kfd_process *process, KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); } +void kfd_flush_tlb(struct kfd_process_device *pdd) +{ + struct kfd_dev *dev = pdd->dev; + const struct kfd2kgd_calls *f2g = dev->kfd2kgd; + + if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + /* Nothing to flush until a VMID is assigned, which + * only happens when the first queue is created. + */ + if (pdd->qpd.vmid) + f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid); + } else { + f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid); + } +} + #if defined(CONFIG_DEBUG_FS) int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) -- cgit From 26103436da003327017af325483b6150a3b855cc Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 6 Feb 2018 20:32:45 -0500 Subject: drm/amdkfd: Implement KFD process eviction/restore When the TTM memory manager in KGD evicts BOs, all user mode queues potentially accessing these BOs must be evicted temporarily. Once user mode queues are evicted, the eviction fence is signaled, allowing the migration of the BO to proceed. A delayed worker is scheduled to restore all the BOs belonging to the evicted process and restart its queues. During suspend/resume of the GPU we also evict all processes to allow KGD to save BOs in system memory, since VRAM will be lost. v2: * Account for eviction when updating of q->is_active in MQD manager Signed-off-by: Harish Kasiviswanathan Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 65 +++++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 219 ++++++++++++++++++++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 9 + drivers/gpu/drm/amd/amdkfd/kfd_module.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 9 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 32 ++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 213 ++++++++++++++++++++ 8 files changed, 547 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 4ac2d61b65d5..3346699960dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -33,6 +33,7 @@ #include "kfd_iommu.h" #define MQD_SIZE_ALIGNED 768 +static atomic_t kfd_device_suspended = ATOMIC_INIT(0); #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { @@ -469,6 +470,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) if (!kfd->init_complete) return; + /* For first KFD device suspend all the KFD processes */ + if (atomic_inc_return(&kfd_device_suspended) == 1) + kfd_suspend_all_processes(); + kfd->dqm->ops.stop(kfd->dqm); kfd_iommu_suspend(kfd); @@ -476,11 +481,21 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) int kgd2kfd_resume(struct kfd_dev *kfd) { + int ret, count; + if (!kfd->init_complete) return 0; - return kfd_resume(kfd); + ret = kfd_resume(kfd); + if (ret) + return ret; + + count = atomic_dec_return(&kfd_device_suspended); + WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); + if (count == 0) + ret = kfd_resume_all_processes(); + return ret; } static int kfd_resume(struct kfd_dev *kfd) @@ -526,6 +541,54 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) spin_unlock(&kfd->interrupt_lock); } +/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will + * prepare for safe eviction of KFD BOs that belong to the specified + * process. + * + * @mm: mm_struct that identifies the specified KFD process + * @fence: eviction fence attached to KFD process BOs + * + */ +int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, + struct dma_fence *fence) +{ + struct kfd_process *p; + unsigned long active_time; + unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); + + if (!fence) + return -EINVAL; + + if (dma_fence_is_signaled(fence)) + return 0; + + p = kfd_lookup_process_by_mm(mm); + if (!p) + return -ENODEV; + + if (fence->seqno == p->last_eviction_seqno) + goto out; + + p->last_eviction_seqno = fence->seqno; + + /* Avoid KFD process starvation. Wait for at least + * PROCESS_ACTIVE_TIME_MS before evicting the process again + */ + active_time = get_jiffies_64() - p->last_restore_timestamp; + if (delay_jiffies > active_time) + delay_jiffies -= active_time; + else + delay_jiffies = 0; + + /* During process initialization eviction_work.dwork is initialized + * to kfd_evict_bo_worker + */ + schedule_delayed_work(&p->eviction_work, delay_jiffies); +out: + kfd_unref_process(p); + return 0; +} + static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b7d06395d592..b3b6dab71638 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -21,10 +21,11 @@ * */ +#include +#include #include #include #include -#include #include #include #include "kfd_priv.h" @@ -180,6 +181,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, goto out_unlock; } q->properties.vmid = qpd->vmid; + /* + * Eviction state logic: we only mark active queues as evicted + * to avoid the overhead of restoring inactive queues later + */ + if (qpd->evicted) + q->properties.is_evicted = (q->properties.queue_size > 0 && + q->properties.queue_percent > 0 && + q->properties.queue_address != 0); q->properties.tba_addr = qpd->tba_addr; q->properties.tma_addr = qpd->tma_addr; @@ -377,15 +386,29 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) { int retval; struct mqd_manager *mqd; + struct kfd_process_device *pdd; bool prev_active = false; mutex_lock(&dqm->lock); + pdd = kfd_get_process_device_data(q->device, q->process); + if (!pdd) { + retval = -ENODEV; + goto out_unlock; + } mqd = dqm->ops.get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); if (!mqd) { retval = -ENOMEM; goto out_unlock; } + /* + * Eviction state logic: we only mark active queues as evicted + * to avoid the overhead of restoring inactive queues later + */ + if (pdd->qpd.evicted) + q->properties.is_evicted = (q->properties.queue_size > 0 && + q->properties.queue_percent > 0 && + q->properties.queue_address != 0); /* Save previous activity state for counters */ prev_active = q->properties.is_active; @@ -457,6 +480,187 @@ static struct mqd_manager *get_mqd_manager( return mqd; } +static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct queue *q; + struct mqd_manager *mqd; + struct kfd_process_device *pdd; + int retval = 0; + + mutex_lock(&dqm->lock); + if (qpd->evicted++ > 0) /* already evicted, do nothing */ + goto out; + + pdd = qpd_to_pdd(qpd); + pr_info_ratelimited("Evicting PASID %u queues\n", + pdd->process->pasid); + + /* unactivate all active queues on the qpd */ + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_active) + continue; + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + if (!mqd) { /* should not be here */ + pr_err("Cannot evict queue, mqd mgr is NULL\n"); + retval = -ENOMEM; + goto out; + } + q->properties.is_evicted = true; + q->properties.is_active = false; + retval = mqd->destroy_mqd(mqd, q->mqd, + KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, + KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); + if (retval) + goto out; + dqm->queue_count--; + } + +out: + mutex_unlock(&dqm->lock); + return retval; +} + +static int evict_process_queues_cpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct queue *q; + struct kfd_process_device *pdd; + int retval = 0; + + mutex_lock(&dqm->lock); + if (qpd->evicted++ > 0) /* already evicted, do nothing */ + goto out; + + pdd = qpd_to_pdd(qpd); + pr_info_ratelimited("Evicting PASID %u queues\n", + pdd->process->pasid); + + /* unactivate all active queues on the qpd */ + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_active) + continue; + q->properties.is_evicted = true; + q->properties.is_active = false; + dqm->queue_count--; + } + retval = execute_queues_cpsch(dqm, + qpd->is_debug ? + KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + +out: + mutex_unlock(&dqm->lock); + return retval; +} + +static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct queue *q; + struct mqd_manager *mqd; + struct kfd_process_device *pdd; + uint32_t pd_base; + int retval = 0; + + pdd = qpd_to_pdd(qpd); + /* Retrieve PD base */ + pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); + + mutex_lock(&dqm->lock); + if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ + goto out; + if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ + qpd->evicted--; + goto out; + } + + pr_info_ratelimited("Restoring PASID %u queues\n", + pdd->process->pasid); + + /* Update PD Base in QPD */ + qpd->page_table_base = pd_base; + pr_debug("Updated PD address to 0x%08x\n", pd_base); + + if (!list_empty(&qpd->queues_list)) { + dqm->dev->kfd2kgd->set_vm_context_page_table_base( + dqm->dev->kgd, + qpd->vmid, + qpd->page_table_base); + kfd_flush_tlb(pdd); + } + + /* activate all active queues on the qpd */ + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_evicted) + continue; + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + if (!mqd) { /* should not be here */ + pr_err("Cannot restore queue, mqd mgr is NULL\n"); + retval = -ENOMEM; + goto out; + } + q->properties.is_evicted = false; + q->properties.is_active = true; + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, + q->queue, &q->properties, + q->process->mm); + if (retval) + goto out; + dqm->queue_count++; + } + qpd->evicted = 0; +out: + mutex_unlock(&dqm->lock); + return retval; +} + +static int restore_process_queues_cpsch(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct queue *q; + struct kfd_process_device *pdd; + uint32_t pd_base; + int retval = 0; + + pdd = qpd_to_pdd(qpd); + /* Retrieve PD base */ + pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); + + mutex_lock(&dqm->lock); + if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ + goto out; + if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ + qpd->evicted--; + goto out; + } + + pr_info_ratelimited("Restoring PASID %u queues\n", + pdd->process->pasid); + + /* Update PD Base in QPD */ + qpd->page_table_base = pd_base; + pr_debug("Updated PD address to 0x%08x\n", pd_base); + + /* activate all active queues on the qpd */ + list_for_each_entry(q, &qpd->queues_list, list) { + if (!q->properties.is_evicted) + continue; + q->properties.is_evicted = false; + q->properties.is_active = true; + dqm->queue_count++; + } + retval = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + if (!retval) + qpd->evicted = 0; +out: + mutex_unlock(&dqm->lock); + return retval; +} + static int register_process(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -853,6 +1057,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, retval = -ENOMEM; goto out; } + /* + * Eviction state logic: we only mark active queues as evicted + * to avoid the overhead of restoring inactive queues later + */ + if (qpd->evicted) + q->properties.is_evicted = (q->properties.queue_size > 0 && + q->properties.queue_percent > 0 && + q->properties.queue_address != 0); dqm->asic_ops.init_sdma_vm(dqm, q, qpd); @@ -1291,6 +1503,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_trap_handler = set_trap_handler; dqm->ops.process_termination = process_termination_cpsch; + dqm->ops.evict_process_queues = evict_process_queues_cpsch; + dqm->ops.restore_process_queues = restore_process_queues_cpsch; break; case KFD_SCHED_POLICY_NO_HWS: /* initialize dqm for no cp scheduling */ @@ -1307,6 +1521,9 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_trap_handler = set_trap_handler; dqm->ops.process_termination = process_termination_nocpsch; + dqm->ops.evict_process_queues = evict_process_queues_nocpsch; + dqm->ops.restore_process_queues = + restore_process_queues_nocpsch; break; default: pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 68be0aaff3f4..412beff3281d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -79,6 +79,10 @@ struct device_process_node { * * @process_termination: Clears all process queues belongs to that device. * + * @evict_process_queues: Evict all active queues of a process + * + * @restore_process_queues: Restore all evicted queues queues of a process + * */ struct device_queue_manager_ops { @@ -129,6 +133,11 @@ struct device_queue_manager_ops { int (*process_termination)(struct device_queue_manager *dqm, struct qcm_process_device *qpd); + + int (*evict_process_queues)(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); + int (*restore_process_queues)(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); }; struct device_queue_manager_asic_ops { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 3ac72bed4f31..65574c6a10b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -43,6 +43,8 @@ static const struct kgd2kfd_calls kgd2kfd = { .interrupt = kgd2kfd_interrupt, .suspend = kgd2kfd_suspend, .resume = kgd2kfd_resume, + .schedule_evict_and_restore_process = + kgd2kfd_schedule_evict_and_restore_process, }; int sched_policy = KFD_SCHED_POLICY_HWS; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index fbe3f83ba685..c00c325ed3c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -202,7 +202,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = (q->queue_size > 0 && q->queue_address != 0 && - q->queue_percent > 0); + q->queue_percent > 0 && + !q->is_evicted); return 0; } @@ -245,7 +246,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, q->is_active = (q->queue_size > 0 && q->queue_address != 0 && - q->queue_percent > 0); + q->queue_percent > 0 && + !q->is_evicted); return 0; } @@ -377,7 +379,8 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, q->is_active = (q->queue_size > 0 && q->queue_address != 0 && - q->queue_percent > 0); + q->queue_percent > 0 && + !q->is_evicted); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 58221c1fc917..89e4242e43e7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -198,7 +198,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = (q->queue_size > 0 && q->queue_address != 0 && - q->queue_percent > 0); + q->queue_percent > 0 && + !q->is_evicted); return 0; } @@ -342,7 +343,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, q->is_active = (q->queue_size > 0 && q->queue_address != 0 && - q->queue_percent > 0); + q->queue_percent > 0 && + !q->is_evicted); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 56c2e368f702..cac7aa258162 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -335,7 +335,11 @@ enum kfd_queue_format { * @is_interop: Defines if this is a interop queue. Interop queue means that * the queue can access both graphics and compute resources. * - * @is_active: Defines if the queue is active or not. + * @is_evicted: Defines if the queue is evicted. Only active queues + * are evicted, rendering them inactive. + * + * @is_active: Defines if the queue is active or not. @is_active and + * @is_evicted are protected by the DQM lock. * * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid * of the queue. @@ -357,6 +361,7 @@ struct queue_properties { uint32_t __iomem *doorbell_ptr; uint32_t doorbell_off; bool is_interop; + bool is_evicted; bool is_active; /* Not relevant for user mode queues in cp scheduling */ unsigned int vmid; @@ -460,6 +465,7 @@ struct qcm_process_device { unsigned int queue_count; unsigned int vmid; bool is_debug; + unsigned int evicted; /* eviction counter, 0=active */ /* This flag tells if we should reset all wavefronts on * process termination @@ -486,6 +492,17 @@ struct qcm_process_device { uint64_t tma_addr; }; +/* KFD Memory Eviction */ + +/* Approx. wait time before attempting to restore evicted BOs */ +#define PROCESS_RESTORE_TIME_MS 100 +/* Approx. back off time if restore fails due to lack of memory */ +#define PROCESS_BACK_OFF_TIME_MS 100 +/* Approx. time before evicting the process again */ +#define PROCESS_ACTIVE_TIME_MS 10 + +int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, + struct dma_fence *fence); enum kfd_pdd_bound { PDD_UNBOUND = 0, @@ -600,6 +617,16 @@ struct kfd_process { * during restore */ struct dma_fence *ef; + + /* Work items for evicting and restoring BOs */ + struct delayed_work eviction_work; + struct delayed_work restore_work; + /* seqno of the last scheduled eviction */ + unsigned int last_eviction_seqno; + /* Approx. the last timestamp (in jiffies) when the process was + * restored after an eviction + */ + unsigned long last_restore_timestamp; }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ @@ -629,7 +656,10 @@ void kfd_process_destroy_wq(void); struct kfd_process *kfd_create_process(struct file *filep); struct kfd_process *kfd_get_process(const struct task_struct *); struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); +struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); void kfd_unref_process(struct kfd_process *p); +void kfd_suspend_all_processes(void); +int kfd_resume_all_processes(void); struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, struct kfd_process *p); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index cf4fa25cc430..18b2b86ad503 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -55,6 +55,9 @@ static struct kfd_process *create_process(const struct task_struct *thread, struct file *filep); static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep); +static void evict_process_worker(struct work_struct *work); +static void restore_process_worker(struct work_struct *work); + void kfd_process_create_wq(void) { @@ -230,6 +233,9 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, mutex_unlock(&kfd_processes_mutex); synchronize_srcu(&kfd_processes_srcu); + cancel_delayed_work_sync(&p->eviction_work); + cancel_delayed_work_sync(&p->restore_work); + mutex_lock(&p->mutex); /* Iterate over all process device data structures and if the @@ -351,6 +357,10 @@ static struct kfd_process *create_process(const struct task_struct *thread, if (err != 0) goto err_init_apertures; + INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); + INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); + process->last_restore_timestamp = get_jiffies_64(); + err = kfd_process_init_cwsr(process, filep); if (err) goto err_init_cwsr; @@ -402,6 +412,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); pdd->qpd.dqm = dev->dqm; pdd->qpd.pqm = &p->pqm; + pdd->qpd.evicted = 0; pdd->process = p; pdd->bound = PDD_UNBOUND; pdd->already_dequeued = false; @@ -490,6 +501,208 @@ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid) return ret_p; } +/* This increments the process->ref counter. */ +struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) +{ + struct kfd_process *p; + + int idx = srcu_read_lock(&kfd_processes_srcu); + + p = find_process_by_mm(mm); + if (p) + kref_get(&p->ref); + + srcu_read_unlock(&kfd_processes_srcu, idx); + + return p; +} + +/* process_evict_queues - Evict all user queues of a process + * + * Eviction is reference-counted per process-device. This means multiple + * evictions from different sources can be nested safely. + */ +static int process_evict_queues(struct kfd_process *p) +{ + struct kfd_process_device *pdd; + int r = 0; + unsigned int n_evicted = 0; + + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, + &pdd->qpd); + if (r) { + pr_err("Failed to evict process queues\n"); + goto fail; + } + n_evicted++; + } + + return r; + +fail: + /* To keep state consistent, roll back partial eviction by + * restoring queues + */ + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + if (n_evicted == 0) + break; + if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, + &pdd->qpd)) + pr_err("Failed to restore queues\n"); + + n_evicted--; + } + + return r; +} + +/* process_restore_queues - Restore all user queues of a process */ +static int process_restore_queues(struct kfd_process *p) +{ + struct kfd_process_device *pdd; + int r, ret = 0; + + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, + &pdd->qpd); + if (r) { + pr_err("Failed to restore process queues\n"); + if (!ret) + ret = r; + } + } + + return ret; +} + +static void evict_process_worker(struct work_struct *work) +{ + int ret; + struct kfd_process *p; + struct delayed_work *dwork; + + dwork = to_delayed_work(work); + + /* Process termination destroys this worker thread. So during the + * lifetime of this thread, kfd_process p will be valid + */ + p = container_of(dwork, struct kfd_process, eviction_work); + WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, + "Eviction fence mismatch\n"); + + /* Narrow window of overlap between restore and evict work + * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos + * unreserves KFD BOs, it is possible to evicted again. But + * restore has few more steps of finish. So lets wait for any + * previous restore work to complete + */ + flush_delayed_work(&p->restore_work); + + pr_debug("Started evicting pasid %d\n", p->pasid); + ret = process_evict_queues(p); + if (!ret) { + dma_fence_signal(p->ef); + dma_fence_put(p->ef); + p->ef = NULL; + schedule_delayed_work(&p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); + + pr_debug("Finished evicting pasid %d\n", p->pasid); + } else + pr_err("Failed to evict queues of pasid %d\n", p->pasid); +} + +static void restore_process_worker(struct work_struct *work) +{ + struct delayed_work *dwork; + struct kfd_process *p; + struct kfd_process_device *pdd; + int ret = 0; + + dwork = to_delayed_work(work); + + /* Process termination destroys this worker thread. So during the + * lifetime of this thread, kfd_process p will be valid + */ + p = container_of(dwork, struct kfd_process, restore_work); + + /* Call restore_process_bos on the first KGD device. This function + * takes care of restoring the whole process including other devices. + * Restore can fail if enough memory is not available. If so, + * reschedule again. + */ + pdd = list_first_entry(&p->per_device_data, + struct kfd_process_device, + per_device_list); + + pr_debug("Started restoring pasid %d\n", p->pasid); + + /* Setting last_restore_timestamp before successful restoration. + * Otherwise this would have to be set by KGD (restore_process_bos) + * before KFD BOs are unreserved. If not, the process can be evicted + * again before the timestamp is set. + * If restore fails, the timestamp will be set again in the next + * attempt. This would mean that the minimum GPU quanta would be + * PROCESS_ACTIVE_TIME_MS - (time to execute the following two + * functions) + */ + + p->last_restore_timestamp = get_jiffies_64(); + ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info, + &p->ef); + if (ret) { + pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", + p->pasid, PROCESS_BACK_OFF_TIME_MS); + ret = schedule_delayed_work(&p->restore_work, + msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); + WARN(!ret, "reschedule restore work failed\n"); + return; + } + + ret = process_restore_queues(p); + if (!ret) + pr_debug("Finished restoring pasid %d\n", p->pasid); + else + pr_err("Failed to restore queues of pasid %d\n", p->pasid); +} + +void kfd_suspend_all_processes(void) +{ + struct kfd_process *p; + unsigned int temp; + int idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + cancel_delayed_work_sync(&p->eviction_work); + cancel_delayed_work_sync(&p->restore_work); + + if (process_evict_queues(p)) + pr_err("Failed to suspend process %d\n", p->pasid); + dma_fence_signal(p->ef); + dma_fence_put(p->ef); + p->ef = NULL; + } + srcu_read_unlock(&kfd_processes_srcu, idx); +} + +int kfd_resume_all_processes(void) +{ + struct kfd_process *p; + unsigned int temp; + int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + if (!schedule_delayed_work(&p->restore_work, 0)) { + pr_err("Restore process %d failed during resume\n", + p->pasid); + ret = -EFAULT; + } + } + srcu_read_unlock(&kfd_processes_srcu, idx); + return ret; +} + int kfd_reserved_mem_mmap(struct kfd_process *process, struct vm_area_struct *vma) { -- cgit From 48a44387189ce575cbfe48b5467884b6576cdaf2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 15 Mar 2018 17:49:40 +0100 Subject: drm/amdkfd: fix uninitialized variable use When CONFIG_ACPI is disabled, we never initialize the acpi_table structure in kfd_create_crat_image_virtual: drivers/gpu/drm/amd/amdkfd/kfd_crat.c: In function 'kfd_create_crat_image_virtual': drivers/gpu/drm/amd/amdkfd/kfd_crat.c:888:40: error: 'acpi_table' may be used uninitialized in this function [-Werror=maybe-uninitialized] The undefined behavior also happens for any other acpi_get_table() failure, but then the compiler can't warn about it. This adds an error check that prevents the structure from being used in error, avoiding both the undefined behavior and the warning about it. Fixes: 520b8fb755cc ("drm/amdkfd: Add topology support for CPUs") Signed-off-by: Arnd Bergmann Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 7493f47e7fe1..d85112224f1d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -882,7 +882,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) crat_table->length = sizeof(struct crat_header); status = acpi_get_table("DSDT", 0, &acpi_table); - if (status == AE_NOT_FOUND) + if (status != AE_OK) pr_warn("DSDT table not found for OEM information\n"); else { crat_table->oem_revision = acpi_table->revision; -- cgit From b84394e206a748ce34beeed462f237474f3c6c00 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:44 -0400 Subject: drm/amdkfd: Create KFD VMs on demand MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of creating all VMs on process creation, create them when a process is bound to a device. This will later allow registering an existing VM from a DRM render node FD at runtime, before the process is bound to the device. This way the render node VM can be used for KFD instead of creating our own redundant VM. Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 60 ++++++++++++++++++++++++++------ 2 files changed, 53 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index cac7aa258162..014d608226d5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -536,6 +536,7 @@ struct kfd_process_device { uint64_t scratch_limit; /* VM context for GPUVM allocations */ + struct file *drm_file; void *vm; /* Flag used to tell the pdd has dequeued from the dqm. @@ -661,6 +662,8 @@ void kfd_unref_process(struct kfd_process *p); void kfd_suspend_all_processes(void); int kfd_resume_all_processes(void); +int kfd_process_device_init_vm(struct kfd_process_device *pdd, + struct file *drm_file); struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, struct kfd_process *p); struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 18b2b86ad503..6618aaa6b84f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -30,6 +30,7 @@ #include #include #include +#include struct mm_struct; @@ -158,7 +159,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", pdd->dev->id, p->pasid); - if (pdd->vm) + if (pdd->drm_file) + fput(pdd->drm_file); + else if (pdd->vm) pdd->dev->kfd2kgd->destroy_process_vm( pdd->dev->kgd, pdd->vm); @@ -418,18 +421,51 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->already_dequeued = false; list_add(&pdd->per_device_list, &p->per_device_data); - /* Create the GPUVM context for this specific device */ - if (dev->kfd2kgd->create_process_vm(dev->kgd, &pdd->vm, - &p->kgd_process_info, &p->ef)) { + return pdd; +} + +/** + * kfd_process_device_init_vm - Initialize a VM for a process-device + * + * @pdd: The process-device + * @drm_file: Optional pointer to a DRM file descriptor + * + * If @drm_file is specified, it will be used to acquire the VM from + * that file descriptor. If successful, the @pdd takes ownership of + * the file descriptor. + * + * If @drm_file is NULL, a new VM is created. + * + * Returns 0 on success, -errno on failure. + */ +int kfd_process_device_init_vm(struct kfd_process_device *pdd, + struct file *drm_file) +{ + struct kfd_process *p; + struct kfd_dev *dev; + int ret; + + if (pdd->vm) + return drm_file ? -EBUSY : 0; + + p = pdd->process; + dev = pdd->dev; + + if (drm_file) + ret = dev->kfd2kgd->acquire_process_vm( + dev->kgd, drm_file, + &pdd->vm, &p->kgd_process_info, &p->ef); + else + ret = dev->kfd2kgd->create_process_vm( + dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef); + if (ret) { pr_err("Failed to create process VM object\n"); - goto err_create_pdd; + return ret; } - return pdd; -err_create_pdd: - list_del(&pdd->per_device_list); - kfree(pdd); - return NULL; + pdd->drm_file = drm_file; + + return 0; } /* @@ -455,6 +491,10 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, if (err) return ERR_PTR(err); + err = kfd_process_device_init_vm(pdd, NULL); + if (err) + return ERR_PTR(err); + return pdd; } -- cgit From 7c9b717196b1afd368436c258994ed6054717c7a Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Thu, 15 Mar 2018 17:27:45 -0400 Subject: drm/amdkfd: Populate DRM render device minor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Populate DRM render device minor in kfd topology Signed-off-by: Oak Zeng Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 1 + 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 250615535563..ac28abc94e57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -441,6 +441,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.device_id); sysfs_show_32bit_prop(buffer, "location_id", dev->node_props.location_id); + sysfs_show_32bit_prop(buffer, "drm_render_minor", + dev->node_props.drm_render_minor); if (dev->gpu) { log_max_watch_addr = @@ -1214,6 +1216,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd); dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; + dev->node_props.drm_render_minor = + gpu->shared_resources.drm_render_minor; kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index c0be2be6dca5..eb54cfcaf039 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -71,6 +71,7 @@ struct kfd_node_properties { uint32_t location_id; uint32_t max_engine_clk_fcompute; uint32_t max_engine_clk_ccompute; + int32_t drm_render_minor; uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; }; -- cgit From c7bcbfa4f8d1e0e1078adfe959d4b65542bccf66 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:46 -0400 Subject: drm/amdkfd: Remove limit on number of GPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the number of GPUs is limited by aperture placement options available on GFX7 and GFX8 hardware. This limitation is not necessary. Scratch and LDS represent per-work-item and per-work-group storage respectively. Different work-items and work-groups use the same virtual address to access their own data. Work running on different GPUs is by definition in different work-groups (different dispatches, in fact). That means the same virtual addresses can be used for these apertures on different GPUs. Add a new AMDKFD_IOC_GET_PROCESS_APERTURES_NEW ioctl that removes the artificial limitation on the number of GPUs that can be supported. The new ioctl allows user mode to query the number of GPUs to allocate enough memory for all GPUs to be reported. This deprecates AMDKFD_IOC_GET_PROCESS_APERTURES. Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 94 ++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 22 +++---- 2 files changed, 104 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 6fe24964540b..7d4009418ec3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -825,6 +825,97 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, return 0; } +static int kfd_ioctl_get_process_apertures_new(struct file *filp, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_get_process_apertures_new_args *args = data; + struct kfd_process_device_apertures *pa; + struct kfd_process_device *pdd; + uint32_t nodes = 0; + int ret; + + dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + + if (args->num_of_nodes == 0) { + /* Return number of nodes, so that user space can alloacate + * sufficient memory + */ + mutex_lock(&p->mutex); + + if (!kfd_has_process_device_data(p)) + goto out_unlock; + + /* Run over all pdd of the process */ + pdd = kfd_get_first_process_device_data(p); + do { + args->num_of_nodes++; + pdd = kfd_get_next_process_device_data(p, pdd); + } while (pdd); + + goto out_unlock; + } + + /* Fill in process-aperture information for all available + * nodes, but not more than args->num_of_nodes as that is + * the amount of memory allocated by user + */ + pa = kzalloc((sizeof(struct kfd_process_device_apertures) * + args->num_of_nodes), GFP_KERNEL); + if (!pa) + return -ENOMEM; + + mutex_lock(&p->mutex); + + if (!kfd_has_process_device_data(p)) { + args->num_of_nodes = 0; + kfree(pa); + goto out_unlock; + } + + /* Run over all pdd of the process */ + pdd = kfd_get_first_process_device_data(p); + do { + pa[nodes].gpu_id = pdd->dev->id; + pa[nodes].lds_base = pdd->lds_base; + pa[nodes].lds_limit = pdd->lds_limit; + pa[nodes].gpuvm_base = pdd->gpuvm_base; + pa[nodes].gpuvm_limit = pdd->gpuvm_limit; + pa[nodes].scratch_base = pdd->scratch_base; + pa[nodes].scratch_limit = pdd->scratch_limit; + + dev_dbg(kfd_device, + "gpu id %u\n", pdd->dev->id); + dev_dbg(kfd_device, + "lds_base %llX\n", pdd->lds_base); + dev_dbg(kfd_device, + "lds_limit %llX\n", pdd->lds_limit); + dev_dbg(kfd_device, + "gpuvm_base %llX\n", pdd->gpuvm_base); + dev_dbg(kfd_device, + "gpuvm_limit %llX\n", pdd->gpuvm_limit); + dev_dbg(kfd_device, + "scratch_base %llX\n", pdd->scratch_base); + dev_dbg(kfd_device, + "scratch_limit %llX\n", pdd->scratch_limit); + nodes++; + + pdd = kfd_get_next_process_device_data(p, pdd); + } while (pdd && (nodes < args->num_of_nodes)); + mutex_unlock(&p->mutex); + + args->num_of_nodes = nodes; + ret = copy_to_user( + (void __user *)args->kfd_process_device_apertures_ptr, + pa, + (nodes * sizeof(struct kfd_process_device_apertures))); + kfree(pa); + return ret ? -EFAULT : 0; + +out_unlock: + mutex_unlock(&p->mutex); + return 0; +} + static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, void *data) { @@ -1017,6 +1108,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER, kfd_ioctl_set_trap_handler, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW, + kfd_ioctl_get_process_apertures_new, 0), }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 7377513050e6..a06b0100af96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -282,14 +282,14 @@ (((uint64_t)(base) & \ 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL) -#define MAKE_SCRATCH_APP_BASE(gpu_num) \ - (((uint64_t)(gpu_num) << 61) + 0x100000000L) +#define MAKE_SCRATCH_APP_BASE() \ + (((uint64_t)(0x1UL) << 61) + 0x100000000L) #define MAKE_SCRATCH_APP_LIMIT(base) \ (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) -#define MAKE_LDS_APP_BASE(gpu_num) \ - (((uint64_t)(gpu_num) << 61) + 0x0) +#define MAKE_LDS_APP_BASE() \ + (((uint64_t)(0x1UL) << 61) + 0x0) #define MAKE_LDS_APP_LIMIT(base) \ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) @@ -314,7 +314,7 @@ int kfd_init_apertures(struct kfd_process *process) return -1; } /* - * For 64 bit process aperture will be statically reserved in + * For 64 bit process apertures will be statically reserved in * the x86_64 non canonical process address space * amdkfd doesn't currently support apertures for 32 bit process */ @@ -323,12 +323,11 @@ int kfd_init_apertures(struct kfd_process *process) pdd->gpuvm_base = pdd->gpuvm_limit = 0; pdd->scratch_base = pdd->scratch_limit = 0; } else { - /* - * node id couldn't be 0 - the three MSB bits of - * aperture shoudn't be 0 + /* Same LDS and scratch apertures can be used + * on all GPUs. This allows using more dGPUs + * than placement options for apertures. */ - pdd->lds_base = MAKE_LDS_APP_BASE(id + 1); - + pdd->lds_base = MAKE_LDS_APP_BASE(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1); @@ -336,8 +335,7 @@ int kfd_init_apertures(struct kfd_process *process) pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base); - pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1); - + pdd->scratch_base = MAKE_SCRATCH_APP_BASE(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); } -- cgit From d01994c24cb28b6f200138d98cbfc17b6bd967c5 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:47 -0400 Subject: drm/amdkfd: Aperture setup for dGPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set up the GPUVM aperture for SVM (shared virtual memory) that allows sharing a part of virtual address space between GPUs and CPUs. Report the size of the GPUVM aperture that is supported by KGD accurately. The low part of the GPUVM aperture is reserved for kernel use. This is for kernel-allocated buffers that are only accessed on the GPU: - CWSR trap handler - IB for submitting commands in user-mode context from kernel mode Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 37 ++++++++++++++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++ 2 files changed, 33 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index a06b0100af96..66852de410c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -278,9 +278,8 @@ #define MAKE_GPUVM_APP_BASE(gpu_num) \ (((uint64_t)(gpu_num) << 61) + 0x1000000000000L) -#define MAKE_GPUVM_APP_LIMIT(base) \ - (((uint64_t)(base) & \ - 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL) +#define MAKE_GPUVM_APP_LIMIT(base, size) \ + (((uint64_t)(base) & 0xFFFFFF0000000000UL) + (size) - 1) #define MAKE_SCRATCH_APP_BASE() \ (((uint64_t)(0x1UL) << 61) + 0x100000000L) @@ -293,6 +292,14 @@ #define MAKE_LDS_APP_LIMIT(base) \ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) +/* User mode manages most of the SVM aperture address space. The low + * 16MB are reserved for kernel use (CWSR trap handler and kernel IB + * for now). + */ +#define SVM_USER_BASE 0x1000000ull +#define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) +#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) + int kfd_init_apertures(struct kfd_process *process) { uint8_t id = 0; @@ -330,14 +337,28 @@ int kfd_init_apertures(struct kfd_process *process) pdd->lds_base = MAKE_LDS_APP_BASE(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1); - - pdd->gpuvm_limit = - MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base); - pdd->scratch_base = MAKE_SCRATCH_APP_BASE(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); + + if (dev->device_info->needs_iommu_device) { + /* APUs: GPUVM aperture in + * non-canonical address space + */ + pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1); + pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT( + pdd->gpuvm_base, + dev->shared_resources.gpuvm_size); + } else { + /* dGPUs: SVM aperture starting at 0 + * with small reserved space for kernel + */ + pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_limit = + dev->shared_resources.gpuvm_size - 1; + pdd->qpd.cwsr_base = SVM_CWSR_BASE; + pdd->qpd.ib_base = SVM_IB_BASE; + } } dev_dbg(kfd_device, "node id %u\n", id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 014d608226d5..0d5d924b31ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -488,8 +488,12 @@ struct qcm_process_device { /* CWSR memory */ void *cwsr_kaddr; + uint64_t cwsr_base; uint64_t tba_addr; uint64_t tma_addr; + + /* IB memory */ + uint64_t ib_base; }; /* KFD Memory Eviction */ -- cgit From 52b29d73340da6cbb10ba42b0a28e7fb795afe9c Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:48 -0400 Subject: drm/amdkfd: Add per-process IDR for buffer handles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also used for cleaning up on process termination. v2: Refactored cleanup on process termination Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 11 +++++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 73 ++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 0d5d924b31ef..b2b5ef8c4536 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -543,6 +543,9 @@ struct kfd_process_device { struct file *drm_file; void *vm; + /* GPUVM allocations storage */ + struct idr alloc_idr; + /* Flag used to tell the pdd has dequeued from the dqm. * This is used to prevent dev->dqm->ops.process_termination() from * being called twice when it is already called in IOMMU callback @@ -678,6 +681,14 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, int kfd_reserved_mem_mmap(struct kfd_process *process, struct vm_area_struct *vma); +/* KFD process API for creating and translating handles */ +int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, + void *mem); +void *kfd_process_device_translate_handle(struct kfd_process_device *p, + int handle); +void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, + int handle); + /* Process device data iterator */ struct kfd_process_device *kfd_get_first_process_device_data( struct kfd_process *p); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 6618aaa6b84f..a2ae0236a3ea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -150,6 +150,40 @@ void kfd_unref_process(struct kfd_process *p) kref_put(&p->ref, kfd_process_ref_release); } +static void kfd_process_device_free_bos(struct kfd_process_device *pdd) +{ + struct kfd_process *p = pdd->process; + void *mem; + int id; + + /* + * Remove all handles from idr and release appropriate + * local memory object + */ + idr_for_each_entry(&pdd->alloc_idr, mem, id) { + struct kfd_process_device *peer_pdd; + + list_for_each_entry(peer_pdd, &p->per_device_data, + per_device_list) { + if (!peer_pdd->vm) + continue; + peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu( + peer_pdd->dev->kgd, mem, peer_pdd->vm); + } + + pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem); + kfd_process_device_remove_obj_handle(pdd, id); + } +} + +static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) +{ + struct kfd_process_device *pdd; + + list_for_each_entry(pdd, &p->per_device_data, per_device_list) + kfd_process_device_free_bos(pdd); +} + static void kfd_process_destroy_pdds(struct kfd_process *p) { struct kfd_process_device *pdd, *temp; @@ -171,6 +205,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) free_pages((unsigned long)pdd->qpd.cwsr_kaddr, get_order(KFD_CWSR_TBA_TMA_SIZE)); + idr_destroy(&pdd->alloc_idr); + kfree(pdd); } } @@ -187,6 +223,8 @@ static void kfd_process_wq_release(struct work_struct *work) kfd_iommu_unbind_process(p); + kfd_process_free_outstanding_kfd_bos(p); + kfd_process_destroy_pdds(p); dma_fence_put(p->ef); @@ -371,6 +409,7 @@ static struct kfd_process *create_process(const struct task_struct *thread, return process; err_init_cwsr: + kfd_process_free_outstanding_kfd_bos(process); kfd_process_destroy_pdds(process); err_init_apertures: pqm_uninit(&process->pqm); @@ -421,6 +460,9 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->already_dequeued = false; list_add(&pdd->per_device_list, &p->per_device_data); + /* Init idr used for memory handle translation */ + idr_init(&pdd->alloc_idr); + return pdd; } @@ -520,6 +562,37 @@ bool kfd_has_process_device_data(struct kfd_process *p) return !(list_empty(&p->per_device_data)); } +/* Create specific handle mapped to mem from process local memory idr + * Assumes that the process lock is held. + */ +int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, + void *mem) +{ + return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); +} + +/* Translate specific handle from process local memory idr + * Assumes that the process lock is held. + */ +void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, + int handle) +{ + if (handle < 0) + return NULL; + + return idr_find(&pdd->alloc_idr, handle); +} + +/* Remove specific handle from process local memory idr + * Assumes that the process lock is held. + */ +void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, + int handle) +{ + if (handle >= 0) + idr_remove(&pdd->alloc_idr, handle); +} + /* This increments the process->ref counter. */ struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid) { -- cgit From f35751b87034f0c2d11e60cdfb0179c4f1a7e296 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:49 -0400 Subject: drm/amdkfd: Allocate CWSR trap handler memory for dGPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add helpers for allocating GPUVM memory in kernel mode and use them to allocate memory for the CWSR trap handler. v2: Use dev instead of pdd->dev in kfd_process_free_gpuvm v3: * Cleaned up and simplified kfd_process_alloc_gpuvm * Moved allocation for dGPU to kfd_process_device_init_vm Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 137 ++++++++++++++++++++++++++++--- 1 file changed, 127 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a2ae0236a3ea..aeb339db8a90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -54,7 +54,6 @@ static struct kfd_process *find_process(const struct task_struct *thread); static void kfd_process_ref_release(struct kref *ref); static struct kfd_process *create_process(const struct task_struct *thread, struct file *filep); -static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep); static void evict_process_worker(struct work_struct *work); static void restore_process_worker(struct work_struct *work); @@ -74,6 +73,82 @@ void kfd_process_destroy_wq(void) } } +static void kfd_process_free_gpuvm(struct kgd_mem *mem, + struct kfd_process_device *pdd) +{ + struct kfd_dev *dev = pdd->dev; + + dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm); + dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem); +} + +/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process + * This function should be only called right after the process + * is created and when kfd_processes_mutex is still being held + * to avoid concurrency. Because of that exclusiveness, we do + * not need to take p->mutex. + */ +static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, + uint64_t gpu_va, uint32_t size, + uint32_t flags, void **kptr) +{ + struct kfd_dev *kdev = pdd->dev; + struct kgd_mem *mem = NULL; + int handle; + int err; + + err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size, + pdd->vm, &mem, NULL, flags); + if (err) + goto err_alloc_mem; + + err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm); + if (err) + goto err_map_mem; + + err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true); + if (err) { + pr_debug("Sync memory failed, wait interrupted by user signal\n"); + goto sync_memory_failed; + } + + /* Create an obj handle so kfd_process_device_remove_obj_handle + * will take care of the bo removal when the process finishes. + * We do not need to take p->mutex, because the process is just + * created and the ioctls have not had the chance to run. + */ + handle = kfd_process_device_create_obj_handle(pdd, mem); + + if (handle < 0) { + err = handle; + goto free_gpuvm; + } + + if (kptr) { + err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd, + (struct kgd_mem *)mem, kptr, NULL); + if (err) { + pr_debug("Map GTT BO to kernel failed\n"); + goto free_obj_handle; + } + } + + return err; + +free_obj_handle: + kfd_process_device_remove_obj_handle(pdd, handle); +free_gpuvm: +sync_memory_failed: + kfd_process_free_gpuvm(mem, pdd); + return err; + +err_map_mem: + kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem); +err_alloc_mem: + *kptr = NULL; + return err; +} + struct kfd_process *kfd_create_process(struct file *filep) { struct kfd_process *process; @@ -201,7 +276,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) list_del(&pdd->per_device_list); - if (pdd->qpd.cwsr_kaddr) + if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) free_pages((unsigned long)pdd->qpd.cwsr_kaddr, get_order(KFD_CWSR_TBA_TMA_SIZE)); @@ -312,18 +387,18 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { .release = kfd_process_notifier_release, }; -static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep) +static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) { unsigned long offset; - struct kfd_process_device *pdd = NULL; - struct kfd_dev *dev = NULL; - struct qcm_process_device *qpd = NULL; + struct kfd_process_device *pdd; list_for_each_entry(pdd, &p->per_device_data, per_device_list) { - dev = pdd->dev; - qpd = &pdd->qpd; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr) + struct kfd_dev *dev = pdd->dev; + struct qcm_process_device *qpd = &pdd->qpd; + + if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; + offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT; qpd->tba_addr = (int64_t)vm_mmap(filep, 0, KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, @@ -348,6 +423,36 @@ static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep) return 0; } +static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) +{ + struct kfd_dev *dev = pdd->dev; + struct qcm_process_device *qpd = &pdd->qpd; + uint32_t flags = ALLOC_MEM_FLAGS_GTT | + ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE; + void *kaddr; + int ret; + + if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) + return 0; + + /* cwsr_base is only set for dGPU */ + ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, + KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr); + if (ret) + return ret; + + qpd->cwsr_kaddr = kaddr; + qpd->tba_addr = qpd->cwsr_base; + + memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + + qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; + pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", + qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); + + return 0; +} + static struct kfd_process *create_process(const struct task_struct *thread, struct file *filep) { @@ -402,7 +507,7 @@ static struct kfd_process *create_process(const struct task_struct *thread, INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); process->last_restore_timestamp = get_jiffies_64(); - err = kfd_process_init_cwsr(process, filep); + err = kfd_process_init_cwsr_apu(process, filep); if (err) goto err_init_cwsr; @@ -505,9 +610,21 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, return ret; } + ret = kfd_process_device_init_cwsr_dgpu(pdd); + if (ret) + goto err_init_cwsr; + pdd->drm_file = drm_file; return 0; + +err_init_cwsr: + kfd_process_device_free_bos(pdd); + if (!drm_file) + dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm); + pdd->vm = NULL; + + return ret; } /* -- cgit From 552764b680a65d6069ad651b356d5465082939d0 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:50 -0400 Subject: drm/amdkfd: Add TC flush on VMID deallocation for Hawaii MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On GFX7 the CP does not perform a TC flush when queues are unmapped. To avoid TC eviction from accessing an invalid VMID, flush it explicitly before releasing a VMID. v2: Fix unnecessary list_for_each_entry_safe v3: Moved allocation to kfd_process_device_init_vm Signed-off-by: Amber Lin Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 22 ++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 37 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 34 ++++++++++++++++++++ 4 files changed, 95 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b3b6dab71638..c18e048f23c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -142,12 +142,31 @@ static int allocate_vmid(struct device_queue_manager *dqm, return 0; } +static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, + struct qcm_process_device *qpd) +{ + uint32_t len; + + if (!qpd->ib_kaddr) + return -ENOMEM; + + len = pm_create_release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); + + return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, + qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len); +} + static void deallocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; + /* On GFX v7, CP doesn't flush TC at dequeue */ + if (q->device->device_info->asic_family == CHIP_HAWAII) + if (flush_texture_cache_nocpsch(q->device, qpd)) + pr_err("Failed to flush TC\n"); + kfd_flush_tlb(qpd_to_pdd(qpd)); /* Release the vmid mapping */ @@ -792,11 +811,12 @@ static void uninitialize(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm) { init_interrupts(dqm); - return 0; + return pm_init(&dqm->packets, dqm); } static int stop_nocpsch(struct device_queue_manager *dqm) { + pm_uninit(&dqm->packets); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 0ecbd1f9b606..7614375489a4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -356,6 +356,43 @@ static int pm_create_runlist_ib(struct packet_manager *pm, return retval; } +/* pm_create_release_mem - Create a RELEASE_MEM packet and return the size + * of this packet + * @gpu_addr - GPU address of the packet. It's a virtual address. + * @buffer - buffer to fill up with the packet. It's a CPU kernel pointer + * Return - length of the packet + */ +uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer) +{ + struct pm4_mec_release_mem *packet; + + WARN_ON(!buffer); + + packet = (struct pm4_mec_release_mem *)buffer; + memset(buffer, 0, sizeof(*packet)); + + packet->header.u32All = build_pm4_header(IT_RELEASE_MEM, + sizeof(*packet)); + + packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; + packet->bitfields2.event_index = event_index___release_mem__end_of_pipe; + packet->bitfields2.tcl1_action_ena = 1; + packet->bitfields2.tc_action_ena = 1; + packet->bitfields2.cache_policy = cache_policy___release_mem__lru; + packet->bitfields2.atc = 0; + + packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low; + packet->bitfields3.int_sel = + int_sel___release_mem__send_interrupt_after_write_confirm; + + packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; + packet->address_hi = upper_32_bits(gpu_addr); + + packet->data_lo = 0; + + return sizeof(*packet) / sizeof(unsigned int); +} + int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) { pm->dqm = dqm; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b2b5ef8c4536..aaed005ce1f5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -494,6 +494,7 @@ struct qcm_process_device { /* IB memory */ uint64_t ib_base; + void *ib_kaddr; }; /* KFD Memory Eviction */ @@ -834,6 +835,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, void pm_release_ib(struct packet_manager *pm); +uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer); + uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index aeb339db8a90..45ef2d03a975 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -149,6 +149,36 @@ err_alloc_mem: return err; } +/* kfd_process_device_reserve_ib_mem - Reserve memory inside the + * process for IB usage The memory reserved is for KFD to submit + * IB to AMDGPU from kernel. If the memory is reserved + * successfully, ib_kaddr will have the CPU/kernel + * address. Check ib_kaddr before accessing the memory. + */ +static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) +{ + struct qcm_process_device *qpd = &pdd->qpd; + uint32_t flags = ALLOC_MEM_FLAGS_GTT | + ALLOC_MEM_FLAGS_NO_SUBSTITUTE | + ALLOC_MEM_FLAGS_WRITABLE | + ALLOC_MEM_FLAGS_EXECUTABLE; + void *kaddr; + int ret; + + if (qpd->ib_kaddr || !qpd->ib_base) + return 0; + + /* ib_base is only set for dGPU */ + ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, + &kaddr); + if (ret) + return ret; + + qpd->ib_kaddr = kaddr; + + return 0; +} + struct kfd_process *kfd_create_process(struct file *filep) { struct kfd_process *process; @@ -610,6 +640,9 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, return ret; } + ret = kfd_process_device_reserve_ib_mem(pdd); + if (ret) + goto err_reserve_ib_mem; ret = kfd_process_device_init_cwsr_dgpu(pdd); if (ret) goto err_init_cwsr; @@ -619,6 +652,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, return 0; err_init_cwsr: +err_reserve_ib_mem: kfd_process_device_free_bos(pdd); if (!drm_file) dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm); -- cgit From 5ec7e02854b3b9b55936c3b44b8acfb85e333f49 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:51 -0400 Subject: drm/amdkfd: Add ioctls for GPUVM memory management MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: * Fix error handling after kfd_bind_process_to_device in kfd_ioctl_map_memory_to_gpu v3: * Add ioctl to acquire VM from a DRM FD v4: * Return number of successful map/unmap operations in failure cases * Facilitate partial retry after failed map/unmap * Added comments with parameter descriptions to new APIs * Defined AMDKFD_IOC_FREE_MEMORY_OF_GPU write-only Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 377 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 8 + 2 files changed, 385 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 7d4009418ec3..a563ff2ca7dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1046,6 +1047,366 @@ static int kfd_ioctl_get_tile_config(struct file *filep, return 0; } +static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p, + void *data) +{ + struct kfd_ioctl_acquire_vm_args *args = data; + struct kfd_process_device *pdd; + struct kfd_dev *dev; + struct file *drm_file; + int ret; + + dev = kfd_device_by_id(args->gpu_id); + if (!dev) + return -EINVAL; + + drm_file = fget(args->drm_fd); + if (!drm_file) + return -EINVAL; + + mutex_lock(&p->mutex); + + pdd = kfd_get_process_device_data(dev, p); + if (!pdd) { + ret = -EINVAL; + goto err_unlock; + } + + if (pdd->drm_file) { + ret = pdd->drm_file == drm_file ? 0 : -EBUSY; + goto err_unlock; + } + + ret = kfd_process_device_init_vm(pdd, drm_file); + if (ret) + goto err_unlock; + /* On success, the PDD keeps the drm_file reference */ + mutex_unlock(&p->mutex); + + return 0; + +err_unlock: + mutex_unlock(&p->mutex); + fput(drm_file); + return ret; +} + +bool kfd_dev_is_large_bar(struct kfd_dev *dev) +{ + struct kfd_local_mem_info mem_info; + + if (dev->device_info->needs_iommu_device) + return false; + + dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info); + if (mem_info.local_mem_size_private == 0 && + mem_info.local_mem_size_public > 0) + return true; + return false; +} + +static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; + struct kfd_process_device *pdd; + void *mem; + struct kfd_dev *dev; + int idr_handle; + long err; + uint64_t offset = args->mmap_offset; + uint32_t flags = args->flags; + + if (args->size == 0) + return -EINVAL; + + dev = kfd_device_by_id(args->gpu_id); + if (!dev) + return -EINVAL; + + if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) && + (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && + !kfd_dev_is_large_bar(dev)) { + pr_err("Alloc host visible vram on small bar is not allowed\n"); + return -EINVAL; + } + + mutex_lock(&p->mutex); + + pdd = kfd_bind_process_to_device(dev, p); + if (IS_ERR(pdd)) { + err = PTR_ERR(pdd); + goto err_unlock; + } + + err = dev->kfd2kgd->alloc_memory_of_gpu( + dev->kgd, args->va_addr, args->size, + pdd->vm, (struct kgd_mem **) &mem, &offset, + flags); + + if (err) + goto err_unlock; + + idr_handle = kfd_process_device_create_obj_handle(pdd, mem); + if (idr_handle < 0) { + err = -EFAULT; + goto err_free; + } + + mutex_unlock(&p->mutex); + + args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); + args->mmap_offset = offset; + + return 0; + +err_free: + dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); +err_unlock: + mutex_unlock(&p->mutex); + return err; +} + +static int kfd_ioctl_free_memory_of_gpu(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_free_memory_of_gpu_args *args = data; + struct kfd_process_device *pdd; + void *mem; + struct kfd_dev *dev; + int ret; + + dev = kfd_device_by_id(GET_GPU_ID(args->handle)); + if (!dev) + return -EINVAL; + + mutex_lock(&p->mutex); + + pdd = kfd_get_process_device_data(dev, p); + if (!pdd) { + pr_err("Process device data doesn't exist\n"); + ret = -EINVAL; + goto err_unlock; + } + + mem = kfd_process_device_translate_handle( + pdd, GET_IDR_HANDLE(args->handle)); + if (!mem) { + ret = -EINVAL; + goto err_unlock; + } + + ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); + + /* If freeing the buffer failed, leave the handle in place for + * clean-up during process tear-down. + */ + if (!ret) + kfd_process_device_remove_obj_handle( + pdd, GET_IDR_HANDLE(args->handle)); + +err_unlock: + mutex_unlock(&p->mutex); + return ret; +} + +static int kfd_ioctl_map_memory_to_gpu(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_map_memory_to_gpu_args *args = data; + struct kfd_process_device *pdd, *peer_pdd; + void *mem; + struct kfd_dev *dev, *peer; + long err = 0; + int i; + uint32_t *devices_arr = NULL; + + dev = kfd_device_by_id(GET_GPU_ID(args->handle)); + if (!dev) + return -EINVAL; + + if (!args->n_devices) { + pr_debug("Device IDs array empty\n"); + return -EINVAL; + } + if (args->n_success > args->n_devices) { + pr_debug("n_success exceeds n_devices\n"); + return -EINVAL; + } + + devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr), + GFP_KERNEL); + if (!devices_arr) + return -ENOMEM; + + err = copy_from_user(devices_arr, + (void __user *)args->device_ids_array_ptr, + args->n_devices * sizeof(*devices_arr)); + if (err != 0) { + err = -EFAULT; + goto copy_from_user_failed; + } + + mutex_lock(&p->mutex); + + pdd = kfd_bind_process_to_device(dev, p); + if (IS_ERR(pdd)) { + err = PTR_ERR(pdd); + goto bind_process_to_device_failed; + } + + mem = kfd_process_device_translate_handle(pdd, + GET_IDR_HANDLE(args->handle)); + if (!mem) { + err = -ENOMEM; + goto get_mem_obj_from_handle_failed; + } + + for (i = args->n_success; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (!peer) { + pr_debug("Getting device by id failed for 0x%x\n", + devices_arr[i]); + err = -EINVAL; + goto get_mem_obj_from_handle_failed; + } + + peer_pdd = kfd_bind_process_to_device(peer, p); + if (IS_ERR(peer_pdd)) { + err = PTR_ERR(peer_pdd); + goto get_mem_obj_from_handle_failed; + } + err = peer->kfd2kgd->map_memory_to_gpu( + peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); + if (err) { + pr_err("Failed to map to gpu %d/%d\n", + i, args->n_devices); + goto map_memory_to_gpu_failed; + } + args->n_success = i+1; + } + + mutex_unlock(&p->mutex); + + err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true); + if (err) { + pr_debug("Sync memory failed, wait interrupted by user signal\n"); + goto sync_memory_failed; + } + + /* Flush TLBs after waiting for the page table updates to complete */ + for (i = 0; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (WARN_ON_ONCE(!peer)) + continue; + peer_pdd = kfd_get_process_device_data(peer, p); + if (WARN_ON_ONCE(!peer_pdd)) + continue; + kfd_flush_tlb(peer_pdd); + } + + kfree(devices_arr); + + return err; + +bind_process_to_device_failed: +get_mem_obj_from_handle_failed: +map_memory_to_gpu_failed: + mutex_unlock(&p->mutex); +copy_from_user_failed: +sync_memory_failed: + kfree(devices_arr); + + return err; +} + +static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; + struct kfd_process_device *pdd, *peer_pdd; + void *mem; + struct kfd_dev *dev, *peer; + long err = 0; + uint32_t *devices_arr = NULL, i; + + dev = kfd_device_by_id(GET_GPU_ID(args->handle)); + if (!dev) + return -EINVAL; + + if (!args->n_devices) { + pr_debug("Device IDs array empty\n"); + return -EINVAL; + } + if (args->n_success > args->n_devices) { + pr_debug("n_success exceeds n_devices\n"); + return -EINVAL; + } + + devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr), + GFP_KERNEL); + if (!devices_arr) + return -ENOMEM; + + err = copy_from_user(devices_arr, + (void __user *)args->device_ids_array_ptr, + args->n_devices * sizeof(*devices_arr)); + if (err != 0) { + err = -EFAULT; + goto copy_from_user_failed; + } + + mutex_lock(&p->mutex); + + pdd = kfd_get_process_device_data(dev, p); + if (!pdd) { + err = PTR_ERR(pdd); + goto bind_process_to_device_failed; + } + + mem = kfd_process_device_translate_handle(pdd, + GET_IDR_HANDLE(args->handle)); + if (!mem) { + err = -ENOMEM; + goto get_mem_obj_from_handle_failed; + } + + for (i = args->n_success; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (!peer) { + err = -EINVAL; + goto get_mem_obj_from_handle_failed; + } + + peer_pdd = kfd_get_process_device_data(peer, p); + if (!peer_pdd) { + err = -ENODEV; + goto get_mem_obj_from_handle_failed; + } + err = dev->kfd2kgd->unmap_memory_to_gpu( + peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); + if (err) { + pr_err("Failed to unmap from gpu %d/%d\n", + i, args->n_devices); + goto unmap_memory_from_gpu_failed; + } + args->n_success = i+1; + } + kfree(devices_arr); + + mutex_unlock(&p->mutex); + + return 0; + +bind_process_to_device_failed: +get_mem_obj_from_handle_failed: +unmap_memory_from_gpu_failed: + mutex_unlock(&p->mutex); +copy_from_user_failed: + kfree(devices_arr); + return err; +} + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ .cmd_drv = 0, .name = #ioctl} @@ -1111,6 +1472,22 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW, kfd_ioctl_get_process_apertures_new, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM, + kfd_ioctl_acquire_vm, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU, + kfd_ioctl_alloc_memory_of_gpu, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU, + kfd_ioctl_free_memory_of_gpu, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU, + kfd_ioctl_map_memory_to_gpu, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU, + kfd_ioctl_unmap_memory_from_gpu, 0), + }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index aaed005ce1f5..1542807373d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -509,6 +509,14 @@ struct qcm_process_device { int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, struct dma_fence *fence); +/* 8 byte handle containing GPU ID in the most significant 4 bytes and + * idr_handle in the least significant 4 bytes + */ +#define MAKE_HANDLE(gpu_id, idr_handle) \ + (((uint64_t)(gpu_id) << 32) + idr_handle) +#define GET_GPU_ID(handle) (handle >> 32) +#define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) + enum kfd_pdd_bound { PDD_UNBOUND = 0, PDD_BOUND, -- cgit From 0fc8011f89feb8b2c3008583b777d097e1974660 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:52 -0400 Subject: drm/amdkfd: Kmap event page for dGPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The events page must be accessible in user mode by the GPU and CPU as well as in kernel mode by the CPU. On dGPUs user mode virtual addresses are managed by the Thunk's GPU memory allocation code. Therefore we can't allocate the memory in kernel mode like we do on APUs. But KFD still needs to map the memory for kernel access. To facilitate this, the Thunk provides the buffer handle of the events page to KFD when creating the first event. Signed-off-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 56 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 31 ++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++ 3 files changed, 87 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a563ff2ca7dd..ec0b2c0284ec 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -923,6 +923,58 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, struct kfd_ioctl_create_event_args *args = data; int err; + /* For dGPUs the event page is allocated in user mode. The + * handle is passed to KFD with the first call to this IOCTL + * through the event_page_offset field. + */ + if (args->event_page_offset) { + struct kfd_dev *kfd; + struct kfd_process_device *pdd; + void *mem, *kern_addr; + uint64_t size; + + if (p->signal_page) { + pr_err("Event page is already set\n"); + return -EINVAL; + } + + kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); + if (!kfd) { + pr_err("Getting device by id failed in %s\n", __func__); + return -EINVAL; + } + + mutex_lock(&p->mutex); + pdd = kfd_bind_process_to_device(kfd, p); + if (IS_ERR(pdd)) { + err = PTR_ERR(pdd); + goto out_unlock; + } + + mem = kfd_process_device_translate_handle(pdd, + GET_IDR_HANDLE(args->event_page_offset)); + if (!mem) { + pr_err("Can't find BO, offset is 0x%llx\n", + args->event_page_offset); + err = -EINVAL; + goto out_unlock; + } + mutex_unlock(&p->mutex); + + err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd, + mem, &kern_addr, &size); + if (err) { + pr_err("Failed to map event page to kernel\n"); + return err; + } + + err = kfd_event_page_set(p, kern_addr, size); + if (err) { + pr_err("Failed to set event page\n"); + return err; + } + } + err = kfd_event_create(filp, p, args->event_type, args->auto_reset != 0, args->node_id, &args->event_id, &args->event_trigger_data, @@ -930,6 +982,10 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, &args->event_slot_index); return err; + +out_unlock: + mutex_unlock(&p->mutex); + return err; } static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 6fb9c0d46d63..4890a90f1e44 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -52,6 +52,7 @@ struct kfd_event_waiter { struct kfd_signal_page { uint64_t *kernel_address; uint64_t __user *user_address; + bool need_to_free_pages; }; @@ -79,6 +80,7 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p) KFD_SIGNAL_EVENT_LIMIT * 8); page->kernel_address = backing_store; + page->need_to_free_pages = true; pr_debug("Allocated new event signal page at %p, for process %p\n", page, p); @@ -269,8 +271,9 @@ static void shutdown_signal_page(struct kfd_process *p) struct kfd_signal_page *page = p->signal_page; if (page) { - free_pages((unsigned long)page->kernel_address, - get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); + if (page->need_to_free_pages) + free_pages((unsigned long)page->kernel_address, + get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); kfree(page); } } @@ -292,6 +295,30 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev) return ev->type == KFD_EVENT_TYPE_SIGNAL; } +int kfd_event_page_set(struct kfd_process *p, void *kernel_address, + uint64_t size) +{ + struct kfd_signal_page *page; + + if (p->signal_page) + return -EBUSY; + + page = kzalloc(sizeof(*page), GFP_KERNEL); + if (!page) + return -ENOMEM; + + /* Initialize all events to unsignaled */ + memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT, + KFD_SIGNAL_EVENT_LIMIT * 8); + + page->kernel_address = kernel_address; + + p->signal_page = page; + p->signal_mapped_size = size; + + return 0; +} + int kfd_event_create(struct file *devkfd, struct kfd_process *p, uint32_t event_type, bool auto_reset, uint32_t node_id, uint32_t *event_id, uint32_t *event_trigger_data, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1542807373d7..aa9386356587 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -866,6 +866,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, void kfd_signal_hw_exception_event(unsigned int pasid); int kfd_set_event(struct kfd_process *p, uint32_t event_id); int kfd_reset_event(struct kfd_process *p, uint32_t event_id); +int kfd_event_page_set(struct kfd_process *p, void *kernel_address, + uint64_t size); int kfd_event_create(struct file *devkfd, struct kfd_process *p, uint32_t event_type, bool auto_reset, uint32_t node_id, uint32_t *event_id, uint32_t *event_trigger_data, -- cgit From 374200b154ae48e8f011fb74dab21f80459f9e47 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 15 Mar 2018 17:27:53 -0400 Subject: drm/amdkfd: Add module option for testing large-BAR functionality Simulate large-BAR system by exporting only visible memory. This limits the amount of available VRAM to the size of the BAR, but enables CPU access to VRAM. Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 6 ++++++ 4 files changed, 19 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ec0b2c0284ec..cd679cf1fd30 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1151,6 +1151,11 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) { struct kfd_local_mem_info mem_info; + if (debug_largebar) { + pr_debug("Simulate large-bar allocation on non large-bar machine\n"); + return true; + } + if (dev->device_info->needs_iommu_device) return false; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index d85112224f1d..4f126ef6139b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1117,6 +1117,9 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); + if (debug_largebar) + local_mem_info.local_mem_size_private = 0; + if (local_mem_info.local_mem_size_private == 0) ret = kfd_fill_gpu_memory_affinity(&avail_size, kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 65574c6a10b3..b0acb0603883 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -71,6 +71,11 @@ module_param(send_sigterm, int, 0444); MODULE_PARM_DESC(send_sigterm, "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); +int debug_largebar; +module_param(debug_largebar, int, 0444); +MODULE_PARM_DESC(debug_largebar, + "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); + int ignore_crat; module_param(ignore_crat, int, 0444); MODULE_PARM_DESC(ignore_crat, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index aa9386356587..db27f9f13696 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -104,6 +104,12 @@ extern int cwsr_enable; */ extern int send_sigterm; +/* + * This kernel module is used to simulate large bar machine on non-large bar + * enabled machines. + */ +extern int debug_largebar; + /* * Ignore CRAT table during KFD initialization, can be used to work around * broken CRAT tables on some AMD systems -- cgit From 1679ae8f8f4148766423066aeb3dbb0a985a373a Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:30:36 -0400 Subject: drm/amdkfd: Use ordered workqueue to restore processes Restoring multiple processes concurrently can lead to live-locks where each process prevents the other from validating all its BOs. v2: fix duplicate check of same variable Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_module.c | 6 +++++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 30 ++++++++++++++++++++++++++---- 3 files changed, 32 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index b0acb0603883..e0c07d24d251 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -133,7 +133,9 @@ static int __init kfd_module_init(void) if (err < 0) goto err_topology; - kfd_process_create_wq(); + err = kfd_process_create_wq(); + if (err < 0) + goto err_create_wq; kfd_debugfs_init(); @@ -143,6 +145,8 @@ static int __init kfd_module_init(void) return 0; +err_create_wq: + kfd_topology_shutdown(); err_topology: kfd_chardev_exit(); err_ioctl: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index db27f9f13696..96a9cc0f02c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -674,7 +674,7 @@ struct amdkfd_ioctl_desc { const char *name; }; -void kfd_process_create_wq(void); +int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); struct kfd_process *kfd_create_process(struct file *filep); struct kfd_process *kfd_get_process(const struct task_struct *); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 45ef2d03a975..1711ad0642f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -48,8 +48,17 @@ static DEFINE_MUTEX(kfd_processes_mutex); DEFINE_SRCU(kfd_processes_srcu); +/* For process termination handling */ static struct workqueue_struct *kfd_process_wq; +/* Ordered, single-threaded workqueue for restoring evicted + * processes. Restoring multiple processes concurrently under memory + * pressure can lead to processes blocking each other from validating + * their BOs and result in a live-lock situation where processes + * remain evicted indefinitely. + */ +static struct workqueue_struct *kfd_restore_wq; + static struct kfd_process *find_process(const struct task_struct *thread); static void kfd_process_ref_release(struct kref *ref); static struct kfd_process *create_process(const struct task_struct *thread, @@ -59,10 +68,19 @@ static void evict_process_worker(struct work_struct *work); static void restore_process_worker(struct work_struct *work); -void kfd_process_create_wq(void) +int kfd_process_create_wq(void) { if (!kfd_process_wq) kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); + if (!kfd_restore_wq) + kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0); + + if (!kfd_process_wq || !kfd_restore_wq) { + kfd_process_destroy_wq(); + return -ENOMEM; + } + + return 0; } void kfd_process_destroy_wq(void) @@ -71,6 +89,10 @@ void kfd_process_destroy_wq(void) destroy_workqueue(kfd_process_wq); kfd_process_wq = NULL; } + if (kfd_restore_wq) { + destroy_workqueue(kfd_restore_wq); + kfd_restore_wq = NULL; + } } static void kfd_process_free_gpuvm(struct kgd_mem *mem, @@ -869,7 +891,7 @@ static void evict_process_worker(struct work_struct *work) dma_fence_signal(p->ef); dma_fence_put(p->ef); p->ef = NULL; - schedule_delayed_work(&p->restore_work, + queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); pr_debug("Finished evicting pasid %d\n", p->pasid); @@ -918,7 +940,7 @@ static void restore_process_worker(struct work_struct *work) if (ret) { pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); - ret = schedule_delayed_work(&p->restore_work, + ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); WARN(!ret, "reschedule restore work failed\n"); return; @@ -957,7 +979,7 @@ int kfd_resume_all_processes(void) int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - if (!schedule_delayed_work(&p->restore_work, 0)) { + if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { pr_err("Restore process %d failed during resume\n", p->pasid); ret = -EFAULT; -- cgit