diff options
Diffstat (limited to 'drivers/accel/amdxdna')
-rw-r--r-- | drivers/accel/amdxdna/TODO | 1 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_ctx.c | 130 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_error.c | 8 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_message.c | 16 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_msg_priv.h | 10 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_pci.c | 13 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_pci.h | 10 | ||||
-rw-r--r-- | drivers/accel/amdxdna/aie2_smu.c | 2 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_ctx.c | 24 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_ctx.h | 3 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_gem.c | 411 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_gem.h | 24 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_mailbox.c | 23 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_mailbox.h | 2 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_mailbox_helper.c | 6 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_mailbox_helper.h | 2 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_pci_drv.c | 11 | ||||
-rw-r--r-- | drivers/accel/amdxdna/amdxdna_pci_drv.h | 2 |
18 files changed, 504 insertions, 194 deletions
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO index 5119bccd1917..ad8ac6e315b6 100644 --- a/drivers/accel/amdxdna/TODO +++ b/drivers/accel/amdxdna/TODO @@ -1,3 +1,2 @@ -- Add import and export BO support - Add debugfs support - Add debug BO support diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index 5f43db02b240..e04549f64d69 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -34,6 +34,8 @@ static void aie2_job_release(struct kref *ref) job = container_of(ref, struct amdxdna_sched_job, refcnt); amdxdna_sched_job_cleanup(job); + atomic64_inc(&job->hwctx->job_free_cnt); + wake_up(&job->hwctx->priv->job_free_wq); if (job->out_fence) dma_fence_put(job->out_fence); kfree(job); @@ -134,7 +136,8 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx) if (!fence) return; - dma_fence_wait(fence, false); + /* Wait up to 2 seconds for fw to finish all pending requests */ + dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000)); dma_fence_put(fence); } @@ -185,7 +188,7 @@ aie2_sched_notify(struct amdxdna_sched_job *job) } static int -aie2_sched_resp_handler(void *handle, const u32 *data, size_t size) +aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size) { struct amdxdna_sched_job *job = handle; struct amdxdna_gem_obj *cmd_abo; @@ -203,7 +206,7 @@ aie2_sched_resp_handler(void *handle, const u32 *data, size_t size) goto out; } - status = *data; + status = readl(data); XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status); if (status == AIE2_STATUS_SUCCESS) amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED); @@ -216,7 +219,7 @@ out: } static int -aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size) +aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size) { struct amdxdna_sched_job *job = handle; u32 ret = 0; @@ -230,7 +233,7 @@ aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size) goto out; } - status = *data; + status = readl(data); XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status); out: @@ -239,14 +242,14 @@ out: } static int -aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size) +aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size) { struct amdxdna_sched_job *job = handle; struct amdxdna_gem_obj *cmd_abo; - struct cmd_chain_resp *resp; struct amdxdna_dev *xdna; u32 fail_cmd_status; u32 fail_cmd_idx; + u32 cmd_status; u32 ret = 0; cmd_abo = job->cmd_bo; @@ -256,17 +259,17 @@ aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size) goto out; } - resp = (struct cmd_chain_resp *)data; + cmd_status = readl(data + offsetof(struct cmd_chain_resp, status)); xdna = job->hwctx->client->xdna; - XDNA_DBG(xdna, "Status 0x%x", resp->status); - if (resp->status == AIE2_STATUS_SUCCESS) { + XDNA_DBG(xdna, "Status 0x%x", cmd_status); + if (cmd_status == AIE2_STATUS_SUCCESS) { amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED); goto out; } /* Slow path to handle error, read from ringbuf on BAR */ - fail_cmd_idx = resp->fail_cmd_idx; - fail_cmd_status = resp->fail_cmd_status; + fail_cmd_idx = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_idx)); + fail_cmd_status = readl(data + offsetof(struct cmd_chain_resp, fail_cmd_status)); XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x", fail_cmd_idx, fail_cmd_status); @@ -361,7 +364,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NOMINAL; } -const struct drm_sched_backend_ops sched_ops = { +static const struct drm_sched_backend_ops sched_ops = { .run_job = aie2_sched_job_run, .free_job = aie2_sched_job_free, .timedout_job = aie2_sched_job_timedout, @@ -516,6 +519,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) { struct amdxdna_client *client = hwctx->client; struct amdxdna_dev *xdna = client->xdna; + const struct drm_sched_init_args args = { + .ops = &sched_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = HWCTX_MAX_CMDS, + .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT), + .name = hwctx->name, + .dev = xdna->ddev.dev, + }; struct drm_gpu_scheduler *sched; struct amdxdna_hwctx_priv *priv; struct amdxdna_gem_obj *heap; @@ -573,9 +584,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) might_lock(&priv->io_lock); fs_reclaim_release(GFP_KERNEL); - ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, - HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT), - NULL, NULL, hwctx->name, xdna->ddev.dev); + ret = drm_sched_init(sched, &args); if (ret) { XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret); goto free_cmd_bufs; @@ -616,6 +625,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) hwctx->status = HWCTX_STAT_INIT; ndev = xdna->dev_handle; ndev->hwctx_num++; + init_waitqueue_head(&priv->job_free_wq); XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name); @@ -652,25 +662,23 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx) xdna = hwctx->client->xdna; ndev = xdna->dev_handle; ndev->hwctx_num--; - drm_sched_wqueue_stop(&hwctx->priv->sched); - /* Now, scheduler will not send command to device. */ + XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq); + drm_sched_entity_destroy(&hwctx->priv->entity); + + aie2_hwctx_wait_for_idle(hwctx); + + /* Request fw to destroy hwctx and cancel the rest pending requests */ aie2_release_resource(hwctx); - /* - * All submitted commands are aborted. - * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run() - * will return NODEV if it is called. - */ - drm_sched_wqueue_start(&hwctx->priv->sched); + /* Wait for all submitted jobs to be completed or canceled */ + wait_event(hwctx->priv->job_free_wq, + atomic64_read(&hwctx->job_submit_cnt) == + atomic64_read(&hwctx->job_free_cnt)); - aie2_hwctx_wait_for_idle(hwctx); - drm_sched_entity_destroy(&hwctx->priv->entity); drm_sched_fini(&hwctx->priv->sched); aie2_ctx_syncobj_destroy(hwctx); - XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq); - for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++) drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx])); amdxdna_gem_unpin(hwctx->priv->heap); @@ -750,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu static int aie2_populate_range(struct amdxdna_gem_obj *abo) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); - struct mm_struct *mm = abo->mem.notifier.mm; - struct hmm_range range = { 0 }; + struct amdxdna_umap *mapp; unsigned long timeout; + struct mm_struct *mm; + bool found; int ret; - XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx", - abo->mem.userptr, abo->mem.size); - range.notifier = &abo->mem.notifier; - range.start = abo->mem.userptr; - range.end = abo->mem.userptr + abo->mem.size; - range.hmm_pfns = abo->mem.pfns; - range.default_flags = HMM_PFN_REQ_FAULT; + timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); +again: + found = false; + down_write(&xdna->notifier_lock); + list_for_each_entry(mapp, &abo->mem.umap_list, node) { + if (mapp->invalid) { + found = true; + break; + } + } + + if (!found) { + abo->mem.map_invalid = false; + up_write(&xdna->notifier_lock); + return 0; + } + kref_get(&mapp->refcnt); + up_write(&xdna->notifier_lock); - if (!mmget_not_zero(mm)) + XDNA_DBG(xdna, "populate memory range %lx %lx", + mapp->vma->vm_start, mapp->vma->vm_end); + mm = mapp->notifier.mm; + if (!mmget_not_zero(mm)) { + amdxdna_umap_put(mapp); return -EFAULT; + } - timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); -again: - range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier); + mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier); mmap_read_lock(mm); - ret = hmm_range_fault(&range); + ret = hmm_range_fault(&mapp->range); mmap_read_unlock(mm); if (ret) { if (time_after(jiffies, timeout)) { @@ -778,21 +801,27 @@ again: goto put_mm; } - if (ret == -EBUSY) + if (ret == -EBUSY) { + amdxdna_umap_put(mapp); goto again; + } goto put_mm; } - down_read(&xdna->notifier_lock); - if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) { - up_read(&xdna->notifier_lock); + down_write(&xdna->notifier_lock); + if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) { + up_write(&xdna->notifier_lock); + amdxdna_umap_put(mapp); goto again; } - abo->mem.map_invalid = false; - up_read(&xdna->notifier_lock); + mapp->invalid = false; + up_write(&xdna->notifier_lock); + amdxdna_umap_put(mapp); + goto again; put_mm: + amdxdna_umap_put(mapp); mmput(mm); return ret; } @@ -879,6 +908,7 @@ retry: drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx); aie2_job_put(job); + atomic64_inc(&hwctx->job_submit_cnt); return 0; @@ -899,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, struct drm_gem_object *gobj = to_gobj(abo); long ret; - down_write(&xdna->notifier_lock); - abo->mem.map_invalid = true; - mmu_interval_set_seq(&abo->mem.notifier, cur_seq); - up_write(&xdna->notifier_lock); ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP, true, MAX_SCHEDULE_TIMEOUT); if (!ret || ret == -ERESTARTSYS) diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c index b1defaa8513b..5ee905632a39 100644 --- a/drivers/accel/amdxdna/aie2_error.c +++ b/drivers/accel/amdxdna/aie2_error.c @@ -209,16 +209,14 @@ static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u3 return err_col; } -static int aie2_error_async_cb(void *handle, const u32 *data, size_t size) +static int aie2_error_async_cb(void *handle, void __iomem *data, size_t size) { - struct async_event_msg_resp *resp; struct async_event *e = handle; if (data) { - resp = (struct async_event_msg_resp *)data; - e->resp.type = resp->type; + e->resp.type = readl(data + offsetof(struct async_event_msg_resp, type)); wmb(); /* Update status in the end, so that no lock for here */ - e->resp.status = resp->status; + e->resp.status = readl(data + offsetof(struct async_event_msg_resp, status)); } queue_work(e->wq, &e->work); return 0; diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 9e2c9a44f76a..82412eec9a4b 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -356,7 +356,7 @@ fail: } int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, - void *handle, int (*cb)(void*, const u32 *, size_t)) + void *handle, int (*cb)(void*, void __iomem *, size_t)) { struct async_event_msg_req req = { 0 }; struct xdna_mailbox_msg msg = { @@ -435,7 +435,7 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx) } int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)) + int (*notify_cb)(void *, void __iomem *, size_t)) { struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_dev *xdna = hwctx->client->xdna; @@ -525,7 +525,7 @@ aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset, if (!payload) return -EINVAL; - if (!slot_cf_has_space(offset, payload_len)) + if (!slot_has_space(*buf, offset, payload_len)) return -ENOSPC; buf->cu_idx = cu_idx; @@ -558,7 +558,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset, if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE) return -EINVAL; - if (!slot_dpu_has_space(offset, arg_sz)) + if (!slot_has_space(*buf, offset, arg_sz)) return -ENOSPC; buf->inst_buf_addr = sn->buffer; @@ -569,7 +569,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset, memcpy(buf->args, sn->prop_args, arg_sz); /* Accurate buf size to hint firmware to do necessary copy */ - *size += sizeof(*buf) + arg_sz; + *size = sizeof(*buf) + arg_sz; return 0; } @@ -640,7 +640,7 @@ aie2_cmd_op_to_msg_op(u32 op) int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)) + int (*notify_cb)(void *, void __iomem *, size_t)) { struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job); struct mailbox_channel *chann = hwctx->priv->mbox_chann; @@ -705,7 +705,7 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)) + int (*notify_cb)(void *, void __iomem *, size_t)) { struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job); struct mailbox_channel *chann = hwctx->priv->mbox_chann; @@ -740,7 +740,7 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, } int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)) + int (*notify_cb)(void *, void __iomem *, size_t)) { struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]); diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h index 4e02e744b470..6df9065b13f6 100644 --- a/drivers/accel/amdxdna/aie2_msg_priv.h +++ b/drivers/accel/amdxdna/aie2_msg_priv.h @@ -319,18 +319,16 @@ struct async_event_msg_resp { } __packed; #define MAX_CHAIN_CMDBUF_SIZE SZ_4K -#define slot_cf_has_space(offset, payload_size) \ - (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \ - offsetof(struct cmd_chain_slot_execbuf_cf, args[0])) +#define slot_has_space(slot, offset, payload_size) \ + (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \ + sizeof(typeof(slot))) + struct cmd_chain_slot_execbuf_cf { __u32 cu_idx; __u32 arg_cnt; __u32 args[] __counted_by(arg_cnt); }; -#define slot_dpu_has_space(offset, payload_size) \ - (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \ - offsetof(struct cmd_chain_slot_dpu, args[0])) struct cmd_chain_slot_dpu { __u64 inst_buf_addr; __u32 inst_size; diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 5a058e565b01..c6cf7068d23c 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna) goto release_fw; } - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - if (ret) { - XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret); - goto free_irq; - } - psp_conf.fw_size = fw->size; psp_conf.fw_buf = fw->data; for (i = 0; i < PSP_MAX_REGS; i++) @@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna) if (!ndev->psp_hdl) { XDNA_ERR(xdna, "failed to create psp"); ret = -ENOMEM; - goto disable_sva; + goto free_irq; } xdna->dev_handle = ndev; ret = aie2_hw_start(xdna); if (ret) { XDNA_ERR(xdna, "start npu failed, ret %d", ret); - goto disable_sva; + goto free_irq; } ret = aie2_mgmt_fw_query(ndev); @@ -584,8 +578,6 @@ async_event_free: aie2_error_async_events_free(ndev); stop_hw: aie2_hw_stop(xdna); -disable_sva: - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); free_irq: pci_free_irq_vectors(pdev); release_fw: @@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna) aie2_hw_stop(xdna); aie2_error_async_events_free(ndev); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); pci_free_irq_vectors(pdev); } diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index f2d95531ddc2..385914840eaa 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -271,18 +271,18 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size); int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled); int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, - void *handle, int (*cb)(void*, const u32 *, size_t)); + void *handle, int (*cb)(void*, void __iomem *, size_t)); int aie2_config_cu(struct amdxdna_hwctx *hwctx); int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)); + int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)); + int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)); + int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, - int (*notify_cb)(void *, const u32 *, size_t)); + int (*notify_cb)(void *, void __iomem *, size_t)); /* aie2_hwctx.c */ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c index 73388443c676..d303701b0ded 100644 --- a/drivers/accel/amdxdna/aie2_smu.c +++ b/drivers/accel/amdxdna/aie2_smu.c @@ -64,6 +64,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) if (ret) { XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret); + return ret; } ndev->npuclk_freq = freq; @@ -72,6 +73,7 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) if (ret) { XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret); + return ret; } ndev->hclk_freq = freq; ndev->dpm_level = dpm_level; diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c index d11b1c83d9c3..be073224bd69 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.c +++ b/drivers/accel/amdxdna/amdxdna_ctx.c @@ -220,6 +220,8 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr args->syncobj_handle = hwctx->syncobj_hdl; mutex_unlock(&xdna->dev_lock); + atomic64_set(&hwctx->job_submit_cnt, 0); + atomic64_set(&hwctx->job_free_cnt, 0); XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret); drm_dev_exit(idx); return 0; @@ -494,11 +496,11 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client, struct amdxdna_drm_exec_cmd *args) { struct amdxdna_dev *xdna = client->xdna; - u32 *arg_bo_hdls; + u32 *arg_bo_hdls = NULL; u32 cmd_bo_hdl; int ret; - if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) { + if (args->arg_count > MAX_ARG_COUNT) { XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count); return -EINVAL; } @@ -510,14 +512,16 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client, } cmd_bo_hdl = (u32)args->cmd_handles; - arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL); - if (!arg_bo_hdls) - return -ENOMEM; - ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args), - args->arg_count * sizeof(u32)); - if (ret) { - ret = -EFAULT; - goto free_cmd_bo_hdls; + if (args->arg_count) { + arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL); + if (!arg_bo_hdls) + return -ENOMEM; + ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args), + args->arg_count * sizeof(u32)); + if (ret) { + ret = -EFAULT; + goto free_cmd_bo_hdls; + } } ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls, diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h index 80b0304193ec..f0a4a8586d85 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.h +++ b/drivers/accel/amdxdna/amdxdna_ctx.h @@ -87,6 +87,9 @@ struct amdxdna_hwctx { struct amdxdna_qos_info qos; struct amdxdna_hwctx_param_config_cu *cus; u32 syncobj_hdl; + + atomic64_t job_submit_cnt; + atomic64_t job_free_cnt ____cacheline_aligned_in_smp; }; #define drm_job_to_xdna_job(j) \ diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index 606433d73236..26831ec69f89 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -9,7 +9,10 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/gpu_scheduler.h> +#include <linux/dma-buf.h> +#include <linux/dma-direct.h> #include <linux/iosys-map.h> +#include <linux/pagemap.h> #include <linux/vmalloc.h> #include "amdxdna_ctx.h" @@ -18,6 +21,8 @@ #define XDNA_MAX_CMD_BO_SIZE SZ_32K +MODULE_IMPORT_NS("DMA_BUF"); + static int amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap) { @@ -55,57 +60,38 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap) return 0; } -static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) -{ - struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); - struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); - - XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); - if (abo->pinned) - amdxdna_gem_unpin(abo); - - if (abo->type == AMDXDNA_BO_DEV) { - mutex_lock(&abo->client->mm_lock); - drm_mm_remove_node(&abo->mm_node); - mutex_unlock(&abo->client->mm_lock); - - vunmap(abo->mem.kva); - drm_gem_object_put(to_gobj(abo->dev_heap)); - drm_gem_object_release(gobj); - mutex_destroy(&abo->lock); - kfree(abo); - return; - } - - if (abo->type == AMDXDNA_BO_DEV_HEAP) - drm_mm_takedown(&abo->mm); - - drm_gem_vunmap_unlocked(gobj, &map); - mutex_destroy(&abo->lock); - drm_gem_shmem_free(&abo->base); -} - -static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { - .free = amdxdna_gem_obj_free, -}; - static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { - struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj, - mem.notifier); - struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier); + struct amdxdna_gem_obj *abo = mapp->abo; + struct amdxdna_dev *xdna; - XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d", - abo->mem.userptr, abo->mem.size, abo->type); + xdna = to_xdna_dev(to_gobj(abo)->dev); + XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d", + mapp->vma->vm_start, mapp->vma->vm_end, abo->type); if (!mmu_notifier_range_blockable(range)) return false; + down_write(&xdna->notifier_lock); + abo->mem.map_invalid = true; + mapp->invalid = true; + mmu_interval_set_seq(&mapp->notifier, cur_seq); + up_write(&xdna->notifier_lock); + xdna->dev_info->ops->hmm_invalidate(abo, cur_seq); + if (range->event == MMU_NOTIFY_UNMAP) { + down_write(&xdna->notifier_lock); + if (!mapp->unmapped) { + queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); + mapp->unmapped = true; + } + up_write(&xdna->notifier_lock); + } + return true; } @@ -113,102 +99,310 @@ static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = { .invalidate = amdxdna_hmm_invalidate, }; -static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo) +static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo, + struct vm_area_struct *vma) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + struct amdxdna_umap *mapp; + + down_read(&xdna->notifier_lock); + list_for_each_entry(mapp, &abo->mem.umap_list, node) { + if (!vma || mapp->vma == vma) { + if (!mapp->unmapped) { + queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work); + mapp->unmapped = true; + } + if (vma) + break; + } + } + up_read(&xdna->notifier_lock); +} - if (!xdna->dev_info->ops->hmm_invalidate) - return; +static void amdxdna_umap_release(struct kref *ref) +{ + struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt); + struct vm_area_struct *vma = mapp->vma; + struct amdxdna_dev *xdna; + + mmu_interval_notifier_remove(&mapp->notifier); + if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping) + mapping_clear_unevictable(vma->vm_file->f_mapping); + + xdna = to_xdna_dev(to_gobj(mapp->abo)->dev); + down_write(&xdna->notifier_lock); + list_del(&mapp->node); + up_write(&xdna->notifier_lock); + + kvfree(mapp->range.hmm_pfns); + kfree(mapp); +} + +void amdxdna_umap_put(struct amdxdna_umap *mapp) +{ + kref_put(&mapp->refcnt, amdxdna_umap_release); +} + +static void amdxdna_hmm_unreg_work(struct work_struct *work) +{ + struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap, + hmm_unreg_work); - mmu_interval_notifier_remove(&abo->mem.notifier); - kvfree(abo->mem.pfns); - abo->mem.pfns = NULL; + amdxdna_umap_put(mapp); } -static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr, - size_t len) +static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, + struct vm_area_struct *vma) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + unsigned long len = vma->vm_end - vma->vm_start; + unsigned long addr = vma->vm_start; + struct amdxdna_umap *mapp; u32 nr_pages; int ret; if (!xdna->dev_info->ops->hmm_invalidate) return 0; - if (abo->mem.pfns) - return -EEXIST; + mapp = kzalloc(sizeof(*mapp), GFP_KERNEL); + if (!mapp) + return -ENOMEM; nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT; - abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns), - GFP_KERNEL); - if (!abo->mem.pfns) - return -ENOMEM; + mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns), + GFP_KERNEL); + if (!mapp->range.hmm_pfns) { + ret = -ENOMEM; + goto free_map; + } - ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier, + ret = mmu_interval_notifier_insert_locked(&mapp->notifier, current->mm, addr, len, &amdxdna_hmm_ops); if (ret) { XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret); - kvfree(abo->mem.pfns); + goto free_pfns; } - abo->mem.userptr = addr; + mapp->range.notifier = &mapp->notifier; + mapp->range.start = vma->vm_start; + mapp->range.end = vma->vm_end; + mapp->range.default_flags = HMM_PFN_REQ_FAULT; + mapp->vma = vma; + mapp->abo = abo; + kref_init(&mapp->refcnt); + + if (abo->mem.userptr == AMDXDNA_INVALID_ADDR) + abo->mem.userptr = addr; + INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work); + if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping) + mapping_set_unevictable(vma->vm_file->f_mapping); + + down_write(&xdna->notifier_lock); + list_add_tail(&mapp->node, &abo->mem.umap_list); + up_write(&xdna->notifier_lock); + + return 0; + +free_pfns: + kvfree(mapp->range.hmm_pfns); +free_map: + kfree(mapp); return ret; } +static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, + struct vm_area_struct *vma) +{ + struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); + unsigned long num_pages = vma_pages(vma); + unsigned long offset = 0; + int ret; + + if (!is_import_bo(abo)) { + ret = drm_gem_shmem_mmap(&abo->base, vma); + if (ret) { + XDNA_ERR(xdna, "Failed shmem mmap %d", ret); + return ret; + } + + /* The buffer is based on memory pages. Fix the flag. */ + vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); + ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, + &num_pages); + if (ret) { + XDNA_ERR(xdna, "Failed insert pages %d", ret); + vma->vm_ops->close(vma); + return ret; + } + + return 0; + } + + vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0); + if (ret) { + XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret); + return ret; + } + + do { + vm_fault_t fault_ret; + + fault_ret = handle_mm_fault(vma, vma->vm_start + offset, + FAULT_FLAG_WRITE, NULL); + if (fault_ret & VM_FAULT_ERROR) { + vma->vm_ops->close(vma); + XDNA_ERR(xdna, "Fault in page failed"); + return -EFAULT; + } + + offset += PAGE_SIZE; + } while (--num_pages); + + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + drm_gem_object_put(to_gobj(abo)); + + return 0; +} + static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj, struct vm_area_struct *vma) { + struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - unsigned long num_pages; int ret; - ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size); + ret = amdxdna_hmm_register(abo, vma); if (ret) return ret; + ret = amdxdna_insert_pages(abo, vma); + if (ret) { + XDNA_ERR(xdna, "Failed insert pages, ret %d", ret); + goto hmm_unreg; + } + + XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx", + drm_vma_node_offset_addr(&gobj->vma_node), abo->type, + vma->vm_start, gobj->size); + return 0; + +hmm_unreg: + amdxdna_hmm_unregister(abo, vma); + return ret; +} + +static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + struct drm_gem_object *gobj = dma_buf->priv; + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); + unsigned long num_pages = vma_pages(vma); + int ret; + + vma->vm_ops = &drm_gem_shmem_vm_ops; + vma->vm_private_data = gobj; + + drm_gem_object_get(gobj); ret = drm_gem_shmem_mmap(&abo->base, vma); if (ret) - goto hmm_unreg; + goto put_obj; - num_pages = gobj->size >> PAGE_SHIFT; - /* Try to insert the pages */ + /* The buffer is based on memory pages. Fix the flag. */ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); - ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages); + ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, + &num_pages); if (ret) - XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret); + goto close_vma; return 0; -hmm_unreg: - amdxdna_hmm_unregister(abo); +close_vma: + vma->vm_ops->close(vma); +put_obj: + drm_gem_object_put(gobj); return ret; } -static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf) +static const struct dma_buf_ops amdxdna_dmabuf_ops = { + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = amdxdna_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; + +static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) { - return drm_gem_shmem_vm_ops.fault(vmf); + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &amdxdna_dmabuf_ops; + exp_info.size = gobj->size; + exp_info.flags = flags; + exp_info.priv = gobj; + exp_info.resv = gobj->resv; + + return drm_gem_dmabuf_export(gobj->dev, &exp_info); } -static void amdxdna_gem_vm_open(struct vm_area_struct *vma) +static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo) { - drm_gem_shmem_vm_ops.open(vma); + dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(abo->dma_buf, abo->attach); + dma_buf_put(abo->dma_buf); + drm_gem_object_release(to_gobj(abo)); + kfree(abo); } -static void amdxdna_gem_vm_close(struct vm_area_struct *vma) +static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) { - struct drm_gem_object *gobj = vma->vm_private_data; + struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); + + XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); + + amdxdna_hmm_unregister(abo, NULL); + flush_workqueue(xdna->notifier_wq); + + if (abo->pinned) + amdxdna_gem_unpin(abo); - amdxdna_hmm_unregister(to_xdna_obj(gobj)); - drm_gem_shmem_vm_ops.close(vma); + if (abo->type == AMDXDNA_BO_DEV) { + mutex_lock(&abo->client->mm_lock); + drm_mm_remove_node(&abo->mm_node); + mutex_unlock(&abo->client->mm_lock); + + vunmap(abo->mem.kva); + drm_gem_object_put(to_gobj(abo->dev_heap)); + drm_gem_object_release(gobj); + mutex_destroy(&abo->lock); + kfree(abo); + return; + } + + if (abo->type == AMDXDNA_BO_DEV_HEAP) + drm_mm_takedown(&abo->mm); + + drm_gem_vunmap(gobj, &map); + mutex_destroy(&abo->lock); + + if (is_import_bo(abo)) { + amdxdna_imported_obj_free(abo); + return; + } + + drm_gem_shmem_free(&abo->base); } -static const struct vm_operations_struct amdxdna_gem_vm_ops = { - .fault = amdxdna_gem_vm_fault, - .open = amdxdna_gem_vm_open, - .close = amdxdna_gem_vm_close, +static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = { + .free = amdxdna_gem_obj_free, }; static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { @@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = amdxdna_gem_obj_mmap, - .vm_ops = &amdxdna_gem_vm_ops, + .vm_ops = &drm_gem_shmem_vm_ops, + .export = amdxdna_gem_prime_export, }; static struct amdxdna_gem_obj * @@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size) abo->mem.userptr = AMDXDNA_INVALID_ADDR; abo->mem.dev_addr = AMDXDNA_INVALID_ADDR; abo->mem.size = size; + INIT_LIST_HEAD(&abo->mem.umap_list); return abo; } @@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size) return to_gobj(abo); } +struct drm_gem_object * +amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct amdxdna_gem_obj *abo; + struct drm_gem_object *gobj; + struct sg_table *sgt; + int ret; + + get_dma_buf(dma_buf); + + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) { + ret = PTR_ERR(attach); + goto put_buf; + } + + sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto fail_detach; + } + + gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(gobj)) { + ret = PTR_ERR(gobj); + goto fail_unmap; + } + + abo = to_xdna_obj(gobj); + abo->attach = attach; + abo->dma_buf = dma_buf; + + return gobj; + +fail_unmap: + dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); +put_buf: + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} + static struct amdxdna_gem_obj * amdxdna_drm_alloc_shmem(struct drm_device *dev, struct amdxdna_drm_create_bo *args, @@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; - ret = drm_gem_vmap_unlocked(to_gobj(abo), &map); + ret = drm_gem_vmap(to_gobj(abo), &map); if (ret) { XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); goto release_obj; @@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo) struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); int ret; + if (is_import_bo(abo)) + return 0; + switch (abo->type) { case AMDXDNA_BO_SHMEM: case AMDXDNA_BO_DEV_HEAP: @@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo) void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo) { + if (is_import_bo(abo)) + return; + if (abo->type == AMDXDNA_BO_DEV) abo = abo->dev_heap; @@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, goto put_obj; } - if (abo->type == AMDXDNA_BO_DEV) + if (is_import_bo(abo)) + drm_clflush_sg(abo->base.sgt); + else if (abo->type == AMDXDNA_BO_DEV) drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages); else drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT); diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h index 8ccc0375dd9d..aee97e971d6d 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.h +++ b/drivers/accel/amdxdna/amdxdna_gem.h @@ -6,6 +6,20 @@ #ifndef _AMDXDNA_GEM_H_ #define _AMDXDNA_GEM_H_ +#include <linux/hmm.h> + +struct amdxdna_umap { + struct vm_area_struct *vma; + struct mmu_interval_notifier notifier; + struct hmm_range range; + struct work_struct hmm_unreg_work; + struct amdxdna_gem_obj *abo; + struct list_head node; + struct kref refcnt; + bool invalid; + bool unmapped; +}; + struct amdxdna_mem { u64 userptr; void *kva; @@ -13,8 +27,7 @@ struct amdxdna_mem { size_t size; struct page **pages; u32 nr_pages; - struct mmu_interval_notifier notifier; - unsigned long *pfns; + struct list_head umap_list; bool map_invalid; }; @@ -31,9 +44,12 @@ struct amdxdna_gem_obj { struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */ u32 assigned_hwctx; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attach; }; #define to_gobj(obj) (&(obj)->base.base) +#define is_import_bo(obj) ((obj)->attach) static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj) { @@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo) drm_gem_object_put(to_gobj(abo)); } +void amdxdna_umap_put(struct amdxdna_umap *mapp); + struct drm_gem_object * amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size); +struct drm_gem_object * +amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct amdxdna_gem_obj * amdxdna_drm_alloc_dev_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c index e5301fac1397..da1ac89bb78f 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox.c +++ b/drivers/accel/amdxdna/amdxdna_mailbox.c @@ -91,7 +91,7 @@ struct mailbox_pkg { struct mailbox_msg { void *handle; - int (*notify_cb)(void *handle, const u32 *data, size_t size); + int (*notify_cb)(void *handle, void __iomem *data, size_t size); size_t pkg_size; /* package size in bytes */ struct mailbox_pkg pkg; }; @@ -244,7 +244,7 @@ no_space: static int mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header, - void *data) + void __iomem *data) { struct mailbox_msg *mb_msg; int msg_id; @@ -332,7 +332,7 @@ static int mailbox_get_msg(struct mailbox_channel *mb_chann) memcpy_fromio((u32 *)&header + 1, read_addr, rest); read_addr += rest; - ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr); + ret = mailbox_get_resp(mb_chann, &header, read_addr); mailbox_set_headptr(mb_chann, head + msg_size); /* After update head, it can equal to ringbuf_size. This is expected. */ @@ -349,8 +349,6 @@ static irqreturn_t mailbox_irq_handler(int irq, void *p) trace_mbox_irq_handle(MAILBOX_NAME, irq); /* Schedule a rx_work to call the callback functions */ queue_work(mb_chann->work_q, &mb_chann->rx_work); - /* Clear IOHUB register */ - mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); return IRQ_HANDLED; } @@ -367,6 +365,9 @@ static void mailbox_rx_worker(struct work_struct *rx_work) return; } +again: + mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0); + while (1) { /* * If return is 0, keep consuming next message, until there is @@ -380,10 +381,18 @@ static void mailbox_rx_worker(struct work_struct *rx_work) if (unlikely(ret)) { MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret); WRITE_ONCE(mb_chann->bad_state, true); - disable_irq(mb_chann->msix_irq); - break; + return; } } + + /* + * The hardware will not generate interrupt if firmware creates a new + * response right after driver clears interrupt register. Check + * the interrupt register to make sure there is not any new response + * before exiting. + */ + if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr)) + goto again; } int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann, diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h index 57954c303bdd..ea367f2fb738 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox.h +++ b/drivers/accel/amdxdna/amdxdna_mailbox.h @@ -25,7 +25,7 @@ struct mailbox_channel; struct xdna_mailbox_msg { u32 opcode; void *handle; - int (*notify_cb)(void *handle, const u32 *data, size_t size); + int (*notify_cb)(void *handle, void __iomem *data, size_t size); u8 *send_data; size_t send_size; }; diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c index 5139a9c96a91..6d0c24513476 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c +++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c @@ -16,7 +16,7 @@ #include "amdxdna_mailbox_helper.h" #include "amdxdna_pci_drv.h" -int xdna_msg_cb(void *handle, const u32 *data, size_t size) +int xdna_msg_cb(void *handle, void __iomem *data, size_t size) { struct xdna_notify *cb_arg = handle; int ret; @@ -29,9 +29,9 @@ int xdna_msg_cb(void *handle, const u32 *data, size_t size) goto out; } + memcpy_fromio(cb_arg->data, data, cb_arg->size); print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET, - 16, 4, data, cb_arg->size, true); - memcpy(cb_arg->data, data, cb_arg->size); + 16, 4, cb_arg->data, cb_arg->size, true); out: ret = cb_arg->error; complete(&cb_arg->comp); diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h index 23e1317b79fe..710ff8873d61 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h +++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h @@ -35,7 +35,7 @@ struct xdna_notify { .notify_cb = xdna_msg_cb, \ } -int xdna_msg_cb(void *handle, const u32 *data, size_t size); +int xdna_msg_cb(void *handle, void __iomem *data, size_t size); int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann, struct xdna_mailbox_msg *msg); diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index f5b8497cf5ad..f2bf1d374cc7 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = { .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls), .gem_create_object = amdxdna_gem_create_object_cb, + .gem_prime_import = amdxdna_gem_prime_import, }; static const struct amdxdna_dev_info * @@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id) fs_reclaim_release(GFP_KERNEL); } + xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0); + if (!xdna->notifier_wq) + return -ENOMEM; + mutex_lock(&xdna->dev_lock); ret = xdna->dev_info->ops->init(xdna); mutex_unlock(&xdna->dev_lock); if (ret) { XDNA_ERR(xdna, "Hardware init failed, ret %d", ret); - return ret; + goto destroy_notifier_wq; } ret = amdxdna_sysfs_init(xdna); @@ -301,6 +306,8 @@ failed_dev_fini: mutex_lock(&xdna->dev_lock); xdna->dev_info->ops->fini(xdna); mutex_unlock(&xdna->dev_lock); +destroy_notifier_wq: + destroy_workqueue(xdna->notifier_wq); return ret; } @@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev) struct device *dev = &pdev->dev; struct amdxdna_client *client; + destroy_workqueue(xdna->notifier_wq); + pm_runtime_get_noresume(dev); pm_runtime_forbid(dev); diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index 37848a8d8031..ab79600911aa 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -6,6 +6,7 @@ #ifndef _AMDXDNA_PCI_DRV_H_ #define _AMDXDNA_PCI_DRV_H_ +#include <linux/workqueue.h> #include <linux/xarray.h> #define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args) @@ -98,6 +99,7 @@ struct amdxdna_dev { struct list_head client_list; struct amdxdna_fw_ver fw_ver; struct rw_semaphore notifier_lock; /* for mmu notifier*/ + struct workqueue_struct *notifier_wq; }; /* |