diff options
Diffstat (limited to 'drivers/misc/fastrpc.c')
| -rw-r--r-- | drivers/misc/fastrpc.c | 415 |
1 files changed, 250 insertions, 165 deletions
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 5310606113fe..ee652ef01534 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -13,12 +13,13 @@ #include <linux/module.h> #include <linux/of_address.h> #include <linux/of.h> +#include <linux/platform_device.h> #include <linux/sort.h> #include <linux/of_platform.h> #include <linux/rpmsg.h> #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/qcom_scm.h> +#include <linux/firmware/qcom/qcom_scm.h> #include <uapi/misc/fastrpc.h> #include <linux/of_reserved_mem.h> @@ -26,7 +27,7 @@ #define MDSP_DOMAIN_ID (1) #define SDSP_DOMAIN_ID (2) #define CDSP_DOMAIN_ID (3) -#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ +#define GDSP_DOMAIN_ID (4) #define FASTRPC_MAX_SESSIONS 14 #define FASTRPC_MAX_VMIDS 16 #define FASTRPC_ALIGN 128 @@ -104,8 +105,6 @@ #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev) -static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", - "sdsp", "cdsp"}; struct fastrpc_phy_page { u64 addr; /* physical address */ u64 size; /* size of contiguous region */ @@ -137,14 +136,14 @@ struct fastrpc_mmap_rsp_msg { }; struct fastrpc_mmap_req_msg { - s32 pgid; + s32 client_id; u32 flags; u64 vaddr; s32 num; }; struct fastrpc_mem_map_req_msg { - s32 pgid; + s32 client_id; s32 fd; s32 offset; u32 flags; @@ -154,20 +153,20 @@ struct fastrpc_mem_map_req_msg { }; struct fastrpc_munmap_req_msg { - s32 pgid; + s32 client_id; u64 vaddr; u64 size; }; struct fastrpc_mem_unmap_req_msg { - s32 pgid; + s32 client_id; s32 fd; u64 vaddrin; u64 len; }; struct fastrpc_msg { - int pid; /* process group id */ + int client_id; /* process client id */ int tid; /* thread id */ u64 ctx; /* invoke caller context */ u32 handle; /* handle to invoke */ @@ -232,7 +231,7 @@ struct fastrpc_invoke_ctx { int nbufs; int retval; int pid; - int tgid; + int client_id; u32 sc; u32 *crc; u64 ctxid; @@ -262,7 +261,6 @@ struct fastrpc_channel_ctx { int domain_id; int sesscount; int vmcount; - u32 perms; struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS]; struct rpmsg_device *rpdev; struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; @@ -298,7 +296,7 @@ struct fastrpc_user { struct fastrpc_session_ctx *sctx; struct fastrpc_buf *init_mem; - int tgid; + int client_id; int pd; bool is_secure_dev; /* Lock for lists */ @@ -316,15 +314,17 @@ static void fastrpc_free_map(struct kref *ref) if (map->table) { if (map->attr & FASTRPC_ATTR_SECUREMAP) { struct qcom_scm_vmperm perm; + int vmid = map->fl->cctx->vmperms[0].vmid; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid); int err = 0; perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(map->phys, map->size, - &map->fl->cctx->perms, &perm, 1); + err = qcom_scm_assign_mem(map->phys, map->len, + &src_perms, &perm, 1); if (err) { - dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", - map->phys, map->size, err); + dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", + map->phys, map->len, err); return; } } @@ -360,32 +360,29 @@ static int fastrpc_map_get(struct fastrpc_map *map) static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, - struct fastrpc_map **ppmap, bool take_ref) + struct fastrpc_map **ppmap) { - struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; + struct dma_buf *buf; int ret = -ENOENT; + buf = dma_buf_get(fd); + if (IS_ERR(buf)) + return PTR_ERR(buf); + spin_lock(&fl->lock); list_for_each_entry(map, &fl->maps, node) { - if (map->fd != fd) + if (map->fd != fd || map->buf != buf) continue; - if (take_ref) { - ret = fastrpc_map_get(map); - if (ret) { - dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", - __func__, fd, ret); - break; - } - } - *ppmap = map; ret = 0; break; } spin_unlock(&fl->lock); + dma_buf_put(buf); + return ret; } @@ -611,7 +608,7 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc( ctx->sc = sc; ctx->retval = -1; ctx->pid = current->pid; - ctx->tgid = user->tgid; + ctx->client_id = user->client_id; ctx->cctx = cctx; init_completion(&ctx->work); INIT_WORK(&ctx->put_work, fastrpc_context_put_wq); @@ -749,15 +746,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = { .release = fastrpc_release, }; -static int fastrpc_map_create(struct fastrpc_user *fl, int fd, +static int fastrpc_map_attach(struct fastrpc_user *fl, int fd, u64 len, u32 attr, struct fastrpc_map **ppmap) { struct fastrpc_session_ctx *sess = fl->sctx; struct fastrpc_map *map = NULL; - int err = 0; - - if (!fastrpc_map_lookup(fl, fd, ppmap, true)) - return 0; + struct sg_table *table; + struct scatterlist *sgl = NULL; + int err = 0, sgl_index = 0; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) @@ -781,15 +777,28 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, goto attach_err; } - map->table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); - if (IS_ERR(map->table)) { - err = PTR_ERR(map->table); + table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL); + if (IS_ERR(table)) { + err = PTR_ERR(table); goto map_err; } + map->table = table; - map->phys = sg_dma_address(map->table->sgl); - map->phys += ((u64)fl->sctx->sid << 32); - map->size = len; + if (attr & FASTRPC_ATTR_SECUREMAP) { + map->phys = sg_phys(map->table->sgl); + } else { + map->phys = sg_dma_address(map->table->sgl); + map->phys += ((u64)fl->sctx->sid << 32); + } + for_each_sg(map->table->sgl, sgl, map->table->nents, + sgl_index) + map->size += sg_dma_len(sgl); + if (len > map->size) { + dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n", + len, map->size); + err = -EINVAL; + goto map_err; + } map->va = sg_virt(map->table->sgl); map->len = len; @@ -798,12 +807,18 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, * If subsystem VMIDs are defined in DTSI, then do * hyp_assign from HLOS to those VM(s) */ + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm dst_perms[2] = {0}; + + dst_perms[0].vmid = QCOM_SCM_VMID_HLOS; + dst_perms[0].perm = QCOM_SCM_PERM_RW; + dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; + dst_perms[1].perm = QCOM_SCM_PERM_RWX; map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, - fl->cctx->vmperms, fl->cctx->vmcount); + err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2); if (err) { - dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", - map->phys, map->size, err); + dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", + map->phys, map->len, err); goto map_err; } } @@ -824,6 +839,24 @@ get_err: return err; } +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, u32 attr, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + int err = 0; + + if (!fastrpc_map_lookup(fl, fd, ppmap)) { + if (!fastrpc_map_get(*ppmap)) + return 0; + dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n", + __func__, fd); + } + + err = fastrpc_map_attach(fl, fd, len, attr, ppmap); + + return err; +} + /* * Fastrpc payload buffer with metadata looks like: * @@ -896,8 +929,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) ctx->args[i].length == 0) continue; - err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, - ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + if (i < ctx->nbufs) + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); + else + err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]); if (err) { dev_err(dev, "Error Creating map %d\n", err); return -EINVAL; @@ -939,10 +976,14 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) ctx->msg_sz = pkt_size; - err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + if (ctx->fl->sctx->sid) + err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + else + err = fastrpc_remote_heap_alloc(ctx->fl, dev, pkt_size, &ctx->buf); if (err) return err; + memset(ctx->buf->virt, 0, pkt_size); rpra = ctx->buf->virt; list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); pages = fastrpc_phy_page_start(list, ctx->nscalars); @@ -973,7 +1014,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) mmap_read_lock(current->mm); vma = find_vma(current->mm, ctx->args[i].ptr); if (vma) - pages[i].addr += ctx->args[i].ptr - + pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) - vma->vm_start; mmap_read_unlock(current->mm); @@ -1000,8 +1041,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) (pkt_size - rlen); pages[i].addr = pages[i].addr & PAGE_MASK; - pg_start = (args & PAGE_MASK) >> PAGE_SHIFT; - pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; + pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT; + pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT; pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; args = args + mlen; rlen -= mlen; @@ -1052,6 +1093,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, struct fastrpc_phy_page *pages; u64 *fdlist; int i, inbufs, outbufs, handles; + int ret = 0; inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc); @@ -1067,22 +1109,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, u64 len = rpra[i].buf.len; if (!kernel) { - if (copy_to_user((void __user *)dst, src, len)) - return -EFAULT; + if (copy_to_user((void __user *)dst, src, len)) { + ret = -EFAULT; + goto cleanup_fdlist; + } } else { memcpy(dst, src, len); } } } +cleanup_fdlist: + /* Clean up fdlist which is updated by DSP */ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { if (!fdlist[i]) break; - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) fastrpc_map_put(mmap); } - return 0; + return ret; } static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, @@ -1095,11 +1141,11 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, int ret; cctx = fl->cctx; - msg->pid = fl->tgid; + msg->client_id = fl->client_id; msg->tid = current->pid; if (kernel) - msg->pid = 0; + msg->client_id = 0; msg->ctx = ctx->ctxid | fl->pd; msg->handle = handle; @@ -1141,11 +1187,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (ctx->nscalars) { - err = fastrpc_get_args(kernel, ctx); - if (err) - goto bail; - } + err = fastrpc_get_args(kernel, ctx); + if (err) + goto bail; /* make sure that all CPU memory writes are seen by DSP */ dma_wmb(); @@ -1164,20 +1208,18 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (err) goto bail; + /* make sure that all memory writes by DSP are seen by CPU */ + dma_rmb(); + /* populate all the output buffers with results */ + err = fastrpc_put_args(ctx, kernel); + if (err) + goto bail; + /* Check the response from remote dsp */ err = ctx->retval; if (err) goto bail; - if (ctx->nscalars) { - /* make sure that all memory writes by DSP are seen by CPU */ - dma_rmb(); - /* populate all the output buffers with results */ - err = fastrpc_put_args(ctx, kernel); - if (err) - goto bail; - } - bail: if (err != -ERESTARTSYS && err != -ETIMEDOUT) { /* We are done with this compute context */ @@ -1210,7 +1252,7 @@ static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_reques * that does not support unsigned PD offload */ if (!fl->cctx->unsigned_support || !unsigned_pd_request) { - dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD"); + dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n"); return true; } } @@ -1226,8 +1268,9 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, struct fastrpc_phy_page pages[1]; char *name; int err; + bool scm_done = false; struct { - int pgid; + int client_id; u32 namelen; u32 pageslen; } inbuf; @@ -1247,17 +1290,12 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, goto err; } - name = kzalloc(init.namelen, GFP_KERNEL); - if (!name) { - err = -ENOMEM; + name = memdup_user(u64_to_user_ptr(init.name), init.namelen); + if (IS_ERR(name)) { + err = PTR_ERR(name); goto err; } - if (copy_from_user(name, (void __user *)(uintptr_t)init.name, init.namelen)) { - err = -EFAULT; - goto err_name; - } - if (!fl->cctx->remote_heap) { err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen, &fl->cctx->remote_heap); @@ -1266,19 +1304,22 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, /* Map if we have any heap VMIDs associated with this ADSP Static Process. */ if (fl->cctx->vmcount) { + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, (u64)fl->cctx->remote_heap->size, - &fl->cctx->perms, + &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { - dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", + dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n", fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); goto err_map; } + scm_done = true; } } - inbuf.pgid = fl->tgid; + inbuf.client_id = fl->client_id; inbuf.namelen = init.namelen; inbuf.pageslen = 0; fl->pd = USER_PD; @@ -1306,19 +1347,25 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, goto err_invoke; kfree(args); + kfree(name); return 0; err_invoke: - if (fl->cctx->vmcount) { - struct qcom_scm_vmperm perm; + if (fl->cctx->vmcount && scm_done) { + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms; + u32 i; + + for (i = 0; i < fl->cctx->vmcount; i++) + src_perms |= BIT(fl->cctx->vmperms[i].vmid); - perm.vmid = QCOM_SCM_VMID_HLOS; - perm.perm = QCOM_SCM_PERM_RWX; + dst_perms.vmid = QCOM_SCM_VMID_HLOS; + dst_perms.perm = QCOM_SCM_PERM_RWX; err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, (u64)fl->cctx->remote_heap->size, - &fl->cctx->perms, &perm, 1); + &src_perms, &dst_perms, 1); if (err) - dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", + dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n", fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); } err_map: @@ -1342,7 +1389,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl, int memlen; int err; struct { - int pgid; + int client_id; u32 namelen; u32 filelen; u32 pageslen; @@ -1374,7 +1421,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl, goto err; } - inbuf.pgid = fl->tgid; + inbuf.client_id = fl->client_id; inbuf.namelen = strlen(current->comm) + 1; inbuf.filelen = init.filelen; inbuf.pageslen = 1; @@ -1425,7 +1472,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl, sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0); if (init.attrs) - sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0); + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0); err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, args); @@ -1448,8 +1495,9 @@ err: } static struct fastrpc_session_ctx *fastrpc_session_alloc( - struct fastrpc_channel_ctx *cctx) + struct fastrpc_user *fl) { + struct fastrpc_channel_ctx *cctx = fl->cctx; struct fastrpc_session_ctx *session = NULL; unsigned long flags; int i; @@ -1459,6 +1507,8 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc( if (!cctx->session[i].used && cctx->session[i].valid) { cctx->session[i].used = true; session = &cctx->session[i]; + /* any non-zero ID will work, session_idx + 1 is the simplest one */ + fl->client_id = i + 1; break; } } @@ -1480,12 +1530,12 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) { struct fastrpc_invoke_args args[1]; - int tgid = 0; + int client_id = 0; u32 sc; - tgid = fl->tgid; - args[0].ptr = (u64)(uintptr_t) &tgid; - args[0].length = sizeof(tgid); + client_id = fl->client_id; + args[0].ptr = (u64)(uintptr_t) &client_id; + args[0].length = sizeof(client_id); args[0].fd = -1; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); @@ -1558,11 +1608,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) INIT_LIST_HEAD(&fl->maps); INIT_LIST_HEAD(&fl->mmaps); INIT_LIST_HEAD(&fl->user); - fl->tgid = current->tgid; fl->cctx = cctx; fl->is_secure_dev = fdevice->secure; - fl->sctx = fastrpc_session_alloc(cctx); + fl->sctx = fastrpc_session_alloc(fl); if (!fl->sctx) { dev_err(&cctx->rpdev->dev, "No session available\n"); mutex_destroy(&fl->mutex); @@ -1626,11 +1675,11 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) static int fastrpc_init_attach(struct fastrpc_user *fl, int pd) { struct fastrpc_invoke_args args[1]; - int tgid = fl->tgid; + int client_id = fl->client_id; u32 sc; - args[0].ptr = (u64)(uintptr_t) &tgid; - args[0].length = sizeof(tgid); + args[0].ptr = (u64)(uintptr_t) &client_id; + args[0].length = sizeof(client_id); args[0].fd = -1; sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); fl->pd = pd; @@ -1674,16 +1723,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr { struct fastrpc_invoke_args args[2] = { 0 }; - /* Capability filled in userspace */ + /* + * Capability filled in userspace. This carries the information + * about the remoteproc support which is fetched from the remoteproc + * sysfs node by userspace. + */ dsp_attr_buf[0] = 0; + dsp_attr_buf_len -= 1; args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len; args[0].length = sizeof(dsp_attr_buf_len); args[0].fd = -1; args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1]; - args[1].length = dsp_attr_buf_len; + args[1].length = dsp_attr_buf_len * sizeof(u32); args[1].fd = -1; - fl->pd = USER_PD; return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, FASTRPC_SCALARS(0, 1, 1), args); @@ -1696,7 +1749,6 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, uint32_t attribute_id = cap->attribute_id; uint32_t *dsp_attributes; unsigned long flags; - uint32_t domain = cap->domain; int err; spin_lock_irqsave(&cctx->lock, flags); @@ -1711,10 +1763,10 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, if (!dsp_attributes) return -ENOMEM; - err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); + err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); if (err == DSP_UNSUPPORTED_API) { dev_info(&cctx->rpdev->dev, - "Warning: DSP capabilities not supported on domain: %d\n", domain); + "Warning: DSP capabilities not supported\n"); kfree(dsp_attributes); return -EOPNOTSUPP; } else if (err) { @@ -1742,17 +1794,6 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) return -EFAULT; cap.capability = 0; - if (cap.domain >= FASTRPC_DEV_MAX) { - dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n", - cap.domain, err); - return -ECHRNG; - } - - /* Fastrpc Capablities does not support modem domain */ - if (cap.domain == MDSP_DOMAIN_ID) { - dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err); - return -ECHRNG; - } if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) { dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n", @@ -1764,7 +1805,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) if (err) return err; - if (copy_to_user(argp, &cap.capability, sizeof(cap.capability))) + if (copy_to_user(argp, &cap, sizeof(cap))) return -EFAULT; return 0; @@ -1778,7 +1819,7 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf * int err; u32 sc; - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.size = buf->size; req_msg.vaddr = buf->raddr; @@ -1854,13 +1895,17 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) return -EINVAL; } - err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf); + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) + err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf); + else + err = fastrpc_buf_alloc(fl, dev, req.size, &buf); + if (err) { dev_err(dev, "failed to allocate buffer\n"); return err; } - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.flags = req.flags; req_msg.vaddr = req.vaddrin; req_msg.num = sizeof(pages); @@ -1882,7 +1927,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) &args[0]); if (err) { dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size); - goto err_invoke; + fastrpc_buf_free(buf); + return err; } /* update the buffer to be able to deallocate the memory on the DSP */ @@ -1892,13 +1938,11 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) req.vaddrout = rsp_msg.vaddr; /* Add memory to static PD pool, protection thru hypervisor */ - if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { - struct qcom_scm_vmperm perm; + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); - perm.vmid = QCOM_SCM_VMID_HLOS; - perm.perm = QCOM_SCM_PERM_RWX; - err = qcom_scm_assign_mem(buf->phys, buf->size, - &fl->cctx->perms, &perm, 1); + err = qcom_scm_assign_mem(buf->phys, (u64)buf->size, + &src_perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", buf->phys, buf->size, err); @@ -1922,8 +1966,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) err_assign: fastrpc_req_munmap_impl(fl, buf); -err_invoke: - fastrpc_buf_free(buf); return err; } @@ -1952,7 +1994,7 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me return -EINVAL; } - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.len = map->len; req_msg.vaddrin = map->raddr; req_msg.fd = map->fd; @@ -1963,11 +2005,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0); err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); - fastrpc_map_put(map); - if (err) + if (err) { dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); + return err; + } + fastrpc_map_put(map); - return err; + return 0; } static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp) @@ -2003,7 +2047,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) return err; } - req_msg.pgid = fl->tgid; + req_msg.client_id = fl->client_id; req_msg.fd = req.fd; req_msg.offset = req.offset; req_msg.vaddrin = req.vaddrin; @@ -2016,7 +2060,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) args[0].length = sizeof(req_msg); pages.addr = map->phys; - pages.size = map->size; + pages.size = map->len; args[1].ptr = (u64) (uintptr_t) &pages; args[1].length = sizeof(pages); @@ -2031,7 +2075,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); if (err) { dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n", - req.fd, req.vaddrin, map->size); + req.fd, req.vaddrin, map->len); goto err_invoke; } @@ -2044,7 +2088,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) if (copy_to_user((void __user *)argp, &req, sizeof(req))) { /* unmap the memory and release the buffer */ req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr; - req_unmap.length = map->size; + req_unmap.length = map->len; fastrpc_req_mem_unmap_impl(fl, &req_unmap); return -EFAULT; } @@ -2163,7 +2207,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev) return 0; } -static int fastrpc_cb_remove(struct platform_device *pdev) +static void fastrpc_cb_remove(struct platform_device *pdev) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); @@ -2171,15 +2215,13 @@ static int fastrpc_cb_remove(struct platform_device *pdev) int i; spin_lock_irqsave(&cctx->lock, flags); - for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { + for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) { if (cctx->session[i].sid == sess->sid) { cctx->session[i].valid = false; cctx->sesscount--; } } spin_unlock_irqrestore(&cctx->lock, flags); - - return 0; } static const struct of_device_id fastrpc_match_table[] = { @@ -2213,6 +2255,9 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct fdev->miscdev.fops = &fastrpc_fops; fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s", domain, is_secured ? "-secure" : ""); + if (!fdev->miscdev.name) + return -ENOMEM; + err = misc_register(&fdev->miscdev); if (!err) { if (is_secured) @@ -2224,6 +2269,22 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct return err; } +static int fastrpc_get_domain_id(const char *domain) +{ + if (!strncmp(domain, "adsp", 4)) + return ADSP_DOMAIN_ID; + else if (!strncmp(domain, "cdsp", 4)) + return CDSP_DOMAIN_ID; + else if (!strncmp(domain, "mdsp", 4)) + return MDSP_DOMAIN_ID; + else if (!strncmp(domain, "sdsp", 4)) + return SDSP_DOMAIN_ID; + else if (!strncmp(domain, "gdsp", 4)) + return GDSP_DOMAIN_ID; + + return -EINVAL; +} + static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) { struct device *rdev = &rpdev->dev; @@ -2239,15 +2300,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return err; } - for (i = 0; i <= CDSP_DOMAIN_ID; i++) { - if (!strcmp(domains[i], domain)) { - domain_id = i; - break; - } - } + domain_id = fastrpc_get_domain_id(domain); if (domain_id < 0) { - dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); + dev_info(rdev, "FastRPC Domain %s not supported\n", domain); return -EINVAL; } @@ -2267,13 +2323,26 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) if (vmcount) { data->vmcount = vmcount; - data->perms = BIT(QCOM_SCM_VMID_HLOS); for (i = 0; i < data->vmcount; i++) { data->vmperms[i].vmid = vmids[i]; data->vmperms[i].perm = QCOM_SCM_PERM_RWX; } } + if (domain_id == SDSP_DOMAIN_ID) { + struct resource res; + u64 src_perms; + + err = of_reserved_mem_region_to_resource(rdev->of_node, 0, &res); + if (!err) { + src_perms = BIT(QCOM_SCM_VMID_HLOS); + + qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms, + data->vmperms, data->vmcount); + } + + } + secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain")); data->secure = secure_dsp; @@ -2281,26 +2350,27 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) case ADSP_DOMAIN_ID: case MDSP_DOMAIN_ID: case SDSP_DOMAIN_ID: - /* Unsigned PD offloading is only supported on CDSP*/ + /* Unsigned PD offloading is only supported on CDSP and GDSP */ data->unsigned_support = false; - err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]); + err = fastrpc_device_register(rdev, data, secure_dsp, domain); if (err) - goto fdev_error; + goto err_free_data; break; case CDSP_DOMAIN_ID: + case GDSP_DOMAIN_ID: data->unsigned_support = true; /* Create both device nodes so that we can allow both Signed and Unsigned PD */ - err = fastrpc_device_register(rdev, data, true, domains[domain_id]); + err = fastrpc_device_register(rdev, data, true, domain); if (err) - goto fdev_error; + goto err_free_data; - err = fastrpc_device_register(rdev, data, false, domains[domain_id]); + err = fastrpc_device_register(rdev, data, false, domain); if (err) - goto fdev_error; + goto err_deregister_fdev; break; default: err = -EINVAL; - goto fdev_error; + goto err_free_data; } kref_init(&data->refcount); @@ -2315,8 +2385,19 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) data->domain_id = domain_id; data->rpdev = rpdev; - return of_platform_populate(rdev->of_node, NULL, NULL, rdev); -fdev_error: + err = of_platform_populate(rdev->of_node, NULL, NULL, rdev); + if (err) + goto err_deregister_fdev; + + return 0; + +err_deregister_fdev: + if (data->fdevice) + misc_deregister(&data->fdevice->miscdev); + if (data->secure_fdevice) + misc_deregister(&data->secure_fdevice->miscdev); + +err_free_data: kfree(data); return err; } @@ -2326,8 +2407,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user) struct fastrpc_invoke_ctx *ctx; spin_lock(&user->lock); - list_for_each_entry(ctx, &user->pending, node) + list_for_each_entry(ctx, &user->pending, node) { + ctx->retval = -EPIPE; complete(&ctx->work); + } spin_unlock(&user->lock); } @@ -2338,7 +2421,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) struct fastrpc_user *user; unsigned long flags; + /* No invocations past this point */ spin_lock_irqsave(&cctx->lock, flags); + cctx->rpdev = NULL; list_for_each_entry(user, &cctx->users, user) fastrpc_notify_users(user); spin_unlock_irqrestore(&cctx->lock, flags); @@ -2357,7 +2442,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) of_platform_depopulate(&rpdev->dev); - cctx->rpdev = NULL; fastrpc_channel_ctx_put(cctx); } @@ -2441,5 +2525,6 @@ static void fastrpc_exit(void) } module_exit(fastrpc_exit); +MODULE_DESCRIPTION("Qualcomm FastRPC"); MODULE_LICENSE("GPL v2"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); |
