diff options
Diffstat (limited to 'drivers/remoteproc/remoteproc_core.c')
| -rw-r--r-- | drivers/remoteproc/remoteproc_core.c | 1163 |
1 files changed, 754 insertions, 409 deletions
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 9f04c30c4aaf..aada2780b343 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -16,32 +16,27 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ +#include <asm/byteorder.h> #include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/device.h> -#include <linux/slab.h> -#include <linux/mutex.h> #include <linux/dma-mapping.h> +#include <linux/elf.h> #include <linux/firmware.h> -#include <linux/string.h> -#include <linux/debugfs.h> -#include <linux/devcoredump.h> +#include <linux/idr.h> +#include <linux/iommu.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> +#include <linux/panic_notifier.h> +#include <linux/platform_device.h> #include <linux/rculist.h> #include <linux/remoteproc.h> -#include <linux/pm_runtime.h> -#include <linux/iommu.h> -#include <linux/idr.h> -#include <linux/elf.h> -#include <linux/crc32.h> -#include <linux/of_reserved_mem.h> -#include <linux/virtio_ids.h> +#include <linux/slab.h> +#include <linux/string.h> #include <linux/virtio_ring.h> -#include <asm/byteorder.h> -#include <linux/platform_device.h> #include "remoteproc_internal.h" -#include "remoteproc_elf_helpers.h" #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL @@ -59,6 +54,7 @@ static int rproc_release_carveout(struct rproc *rproc, /* Unique indices for remoteproc devices */ static DEFINE_IDA(rproc_dev_index); +static struct workqueue_struct *rproc_recovery_wq; static const char * const rproc_crash_names[] = { [RPROC_MMUFAULT] = "mmufault", @@ -109,10 +105,10 @@ static int rproc_enable_iommu(struct rproc *rproc) return 0; } - domain = iommu_domain_alloc(dev->bus); - if (!domain) { + domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) { dev_err(dev, "can't alloc iommu domain\n"); - return -ENOMEM; + return PTR_ERR(domain); } iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); @@ -159,13 +155,13 @@ phys_addr_t rproc_va_to_pa(void *cpu_addr) WARN_ON(!virt_addr_valid(cpu_addr)); return virt_to_phys(cpu_addr); } -EXPORT_SYMBOL(rproc_va_to_pa); /** * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address * @rproc: handle of a remote processor * @da: remoteproc device address to translate * @len: length of the memory region @da is pointing to + * @is_iomem: optional pointer filled in to indicate if @da is iomapped memory * * Some remote processors will ask us to allocate them physically contiguous * memory regions (which we call "carveouts"), and map them to specific @@ -183,20 +179,20 @@ EXPORT_SYMBOL(rproc_va_to_pa); * translations on the internal remoteproc memory regions through a platform * implementation specific da_to_va ops, if present. * - * The function returns a valid kernel address on success or NULL on failure. - * * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, * but only on kernel direct mapped RAM memory. Instead, we're just using * here the output of the DMA API for the carveouts, which should be more * correct. + * + * Return: a valid kernel address on success or NULL on failure */ -void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { struct rproc_mem_entry *carveout; void *ptr = NULL; if (rproc->ops->da_to_va) { - ptr = rproc->ops->da_to_va(rproc, da, len); + ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem); if (ptr) goto out; } @@ -218,6 +214,9 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) ptr = carveout->va + offset; + if (is_iomem) + *is_iomem = carveout->is_iomem; + break; } @@ -244,6 +243,7 @@ EXPORT_SYMBOL(rproc_da_to_va); * * Return: a valid pointer on carveout entry on success or NULL on failure. */ +__printf(2, 3) struct rproc_mem_entry * rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) { @@ -329,7 +329,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) size_t size; /* actual size of vring (in bytes) */ - size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); + size = PAGE_ALIGN(vring_size(rvring->num, rvring->align)); rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; @@ -340,7 +340,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size)) return -ENOMEM; } else { - /* Register carveout in in list */ + /* Register carveout in list */ mem = rproc_mem_entry_init(dev, NULL, 0, size, rsc->vring[i].da, rproc_alloc_carveout, @@ -378,7 +378,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) return 0; } -static int +int rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) { struct rproc *rproc = rvdev->rproc; @@ -396,7 +396,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) return -EINVAL; } - rvring->len = vring->num; + rvring->num = vring->num; rvring->align = vring->align; rvring->rvdev = rvdev; @@ -411,47 +411,39 @@ void rproc_free_vring(struct rproc_vring *rvring) idr_remove(&rproc->notifyids, rvring->notifyid); - /* reset resource entry info */ - rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; - rsc->vring[idx].da = 0; - rsc->vring[idx].notifyid = -1; + /* + * At this point rproc_stop() has been called and the installed resource + * table in the remote processor memory may no longer be accessible. As + * such and as per rproc_stop(), rproc->table_ptr points to the cached + * resource table (rproc->cached_table). The cached resource table is + * only available when a remote processor has been booted by the + * remoteproc core, otherwise it is NULL. + * + * Based on the above, reset the virtio device section in the cached + * resource table only if there is one to work with. + */ + if (rproc->table_ptr) { + rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; + rsc->vring[idx].da = 0; + rsc->vring[idx].notifyid = -1; + } } -static int rproc_vdev_do_start(struct rproc_subdev *subdev) +void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev) { - struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); - - return rproc_add_virtio_dev(rvdev, rvdev->id); + if (rvdev && rproc) + list_add_tail(&rvdev->node, &rproc->rvdevs); } -static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) +void rproc_remove_rvdev(struct rproc_vdev *rvdev) { - struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); - int ret; - - ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev); - if (ret) - dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret); + if (rvdev) + list_del(&rvdev->node); } - -/** - * rproc_rvdev_release() - release the existence of a rvdev - * - * @dev: the subdevice's dev - */ -static void rproc_rvdev_release(struct device *dev) -{ - struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev); - - of_reserved_mem_device_release(dev); - - kfree(rvdev); -} - /** * rproc_handle_vdev() - handle a vdev fw resource * @rproc: the remote processor - * @rsc: the vring resource descriptor + * @ptr: the vring resource descriptor * @offset: offset of the resource entry * @avail: size of available data (for sanity checking the image) * @@ -474,19 +466,21 @@ static void rproc_rvdev_release(struct device *dev) * use RSC_DEVMEM resource entries to map their required @da to the physical * address of their base CMA region (ouch, hacky!). * - * Returns 0 on success, or an appropriate error code otherwise + * Return: 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, +static int rproc_handle_vdev(struct rproc *rproc, void *ptr, int offset, int avail) { + struct fw_rsc_vdev *rsc = ptr; struct device *dev = &rproc->dev; struct rproc_vdev *rvdev; - int i, ret; - char name[16]; + size_t rsc_size; + struct rproc_vdev_data rvdev_data; + struct platform_device *pdev; /* make sure resource isn't truncated */ - if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len > - avail) { + rsc_size = struct_size(rsc, vring, rsc->num_of_vrings); + if (size_add(rsc_size, rsc->config_len) > avail) { dev_err(dev, "vdev rsc is truncated\n"); return -EINVAL; } @@ -506,95 +500,31 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, return -EINVAL; } - rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL); - if (!rvdev) - return -ENOMEM; - - kref_init(&rvdev->refcount); + rvdev_data.id = rsc->id; + rvdev_data.index = rproc->nb_vdev++; + rvdev_data.rsc_offset = offset; + rvdev_data.rsc = rsc; - rvdev->id = rsc->id; - rvdev->rproc = rproc; - rvdev->index = rproc->nb_vdev++; - - /* Initialise vdev subdevice */ - snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); - rvdev->dev.parent = &rproc->dev; - rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset; - rvdev->dev.release = rproc_rvdev_release; - dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); - dev_set_drvdata(&rvdev->dev, rvdev); - - ret = device_register(&rvdev->dev); - if (ret) { - put_device(&rvdev->dev); - return ret; - } - /* Make device dma capable by inheriting from parent's capabilities */ - set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent)); - - ret = dma_coerce_mask_and_coherent(&rvdev->dev, - dma_get_mask(rproc->dev.parent)); - if (ret) { - dev_warn(dev, - "Failed to set DMA mask %llx. Trying to continue... %x\n", - dma_get_mask(rproc->dev.parent), ret); - } - - /* parse the vrings */ - for (i = 0; i < rsc->num_of_vrings; i++) { - ret = rproc_parse_vring(rvdev, rsc, i); - if (ret) - goto free_rvdev; - } - - /* remember the resource offset*/ - rvdev->rsc_offset = offset; - - /* allocate the vring resources */ - for (i = 0; i < rsc->num_of_vrings; i++) { - ret = rproc_alloc_vring(rvdev, i); - if (ret) - goto unwind_vring_allocations; + /* + * When there is more than one remote processor, rproc->nb_vdev number is + * same for each separate instances of "rproc". If rvdev_data.index is used + * as device id, then we get duplication in sysfs, so need to use + * PLATFORM_DEVID_AUTO to auto select device id. + */ + pdev = platform_device_register_data(dev, "rproc-virtio", PLATFORM_DEVID_AUTO, &rvdev_data, + sizeof(rvdev_data)); + if (IS_ERR(pdev)) { + dev_err(dev, "failed to create rproc-virtio device\n"); + return PTR_ERR(pdev); } - list_add_tail(&rvdev->node, &rproc->rvdevs); - - rvdev->subdev.start = rproc_vdev_do_start; - rvdev->subdev.stop = rproc_vdev_do_stop; - - rproc_add_subdev(rproc, &rvdev->subdev); - return 0; - -unwind_vring_allocations: - for (i--; i >= 0; i--) - rproc_free_vring(&rvdev->vring[i]); -free_rvdev: - device_unregister(&rvdev->dev); - return ret; -} - -void rproc_vdev_release(struct kref *ref) -{ - struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount); - struct rproc_vring *rvring; - struct rproc *rproc = rvdev->rproc; - int id; - - for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { - rvring = &rvdev->vring[id]; - rproc_free_vring(rvring); - } - - rproc_remove_subdev(rproc, &rvdev->subdev); - list_del(&rvdev->node); - device_unregister(&rvdev->dev); } /** * rproc_handle_trace() - handle a shared trace buffer resource * @rproc: the remote processor - * @rsc: the trace resource descriptor + * @ptr: the trace resource descriptor * @offset: offset of the resource entry * @avail: size of available data (for sanity checking the image) * @@ -606,11 +536,12 @@ void rproc_vdev_release(struct kref *ref) * support dynamically allocating this address using the generic * DMA API (but currently there isn't a use case for that). * - * Returns 0 on success, or an appropriate error code otherwise + * Return: 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, +static int rproc_handle_trace(struct rproc *rproc, void *ptr, int offset, int avail) { + struct fw_rsc_trace *rsc = ptr; struct rproc_debug_trace *trace; struct device *dev = &rproc->dev; char name[15]; @@ -642,10 +573,6 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, /* create the debugfs entry */ trace->tfile = rproc_create_trace_file(name, rproc, trace); - if (!trace->tfile) { - kfree(trace); - return -EINVAL; - } list_add_tail(&trace->node, &rproc->traces); @@ -660,7 +587,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, /** * rproc_handle_devmem() - handle devmem resource entry * @rproc: remote processor handle - * @rsc: the devmem resource entry + * @ptr: the devmem resource entry * @offset: offset of the resource entry * @avail: size of available data (for sanity checking the image) * @@ -682,10 +609,13 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, * tell us ranges of physical addresses the firmware is allowed to request, * and not allow firmwares to request access to physical addresses that * are outside those ranges. + * + * Return: 0 on success, or an appropriate error code otherwise */ -static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, +static int rproc_handle_devmem(struct rproc *rproc, void *ptr, int offset, int avail) { + struct fw_rsc_devmem *rsc = ptr; struct rproc_mem_entry *mapping; struct device *dev = &rproc->dev; int ret; @@ -709,7 +639,8 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, if (!mapping) return -ENOMEM; - ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); + ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags, + GFP_KERNEL); if (ret) { dev_err(dev, "failed to map devmem: %d\n", ret); goto out; @@ -743,6 +674,8 @@ out: * * This function allocate specified memory entry @mem using * dma_alloc_coherent() as default allocator + * + * Return: 0 on success, or an appropriate error code otherwise */ static int rproc_alloc_carveout(struct rproc *rproc, struct rproc_mem_entry *mem) @@ -761,7 +694,7 @@ static int rproc_alloc_carveout(struct rproc *rproc, return -ENOMEM; } - dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n", + dev_dbg(dev, "carveout va %p, dma %pad, len 0x%zx\n", va, &dma, mem->len); if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { @@ -801,7 +734,7 @@ static int rproc_alloc_carveout(struct rproc *rproc, } ret = iommu_map(rproc->domain, mem->da, dma, mem->len, - mem->flags); + mem->flags, GFP_KERNEL); if (ret) { dev_err(dev, "iommu_map failed: %d\n", ret); goto free_mapping; @@ -849,6 +782,8 @@ dma_free: * * This function releases specified memory entry @mem allocated via * rproc_alloc_carveout() function by @rproc. + * + * Return: 0 on success, or an appropriate error code otherwise */ static int rproc_release_carveout(struct rproc *rproc, struct rproc_mem_entry *mem) @@ -863,7 +798,7 @@ static int rproc_release_carveout(struct rproc *rproc, /** * rproc_handle_carveout() - handle phys contig memory allocation requests * @rproc: rproc handle - * @rsc: the resource entry + * @ptr: the resource entry * @offset: offset of the resource entry * @avail: size of available data (for image validation) * @@ -878,11 +813,13 @@ static int rproc_release_carveout(struct rproc *rproc, * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB * pressure is important; it may have a substantial impact on performance. + * + * Return: 0 on success, or an appropriate error code otherwise */ static int rproc_handle_carveout(struct rproc *rproc, - struct fw_rsc_carveout *rsc, - int offset, int avail) + void *ptr, int offset, int avail) { + struct fw_rsc_carveout *rsc = ptr; struct rproc_mem_entry *carveout; struct device *dev = &rproc->dev; @@ -923,7 +860,7 @@ static int rproc_handle_carveout(struct rproc *rproc, return 0; } - /* Register carveout in in list */ + /* Register carveout in list */ carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da, rproc_alloc_carveout, rproc_release_carveout, rsc->name); @@ -966,7 +903,10 @@ EXPORT_SYMBOL(rproc_add_carveout); * * This function allocates a rproc_mem_entry struct and fill it with parameters * provided by client. + * + * Return: a valid pointer on success, or NULL on failure */ +__printf(8, 9) struct rproc_mem_entry * rproc_mem_entry_init(struct device *dev, void *va, dma_addr_t dma, size_t len, u32 da, @@ -1009,7 +949,10 @@ EXPORT_SYMBOL(rproc_mem_entry_init); * * This function allocates a rproc_mem_entry struct and fill it with parameters * provided by client. + * + * Return: a valid pointer on success, or NULL on failure */ +__printf(5, 6) struct rproc_mem_entry * rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, u32 da, const char *name, ...) @@ -1034,15 +977,38 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, } EXPORT_SYMBOL(rproc_of_resm_mem_entry_init); +/** + * rproc_of_parse_firmware() - parse and return the firmware-name + * @dev: pointer on device struct representing a rproc + * @index: index to use for the firmware-name retrieval + * @fw_name: pointer to a character string, in which the firmware + * name is returned on success and unmodified otherwise. + * + * This is an OF helper function that parses a device's DT node for + * the "firmware-name" property and returns the firmware name pointer + * in @fw_name on success. + * + * Return: 0 on success, or an appropriate failure. + */ +int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name) +{ + int ret; + + ret = of_property_read_string_index(dev->of_node, "firmware-name", + index, fw_name); + return ret ? ret : 0; +} +EXPORT_SYMBOL(rproc_of_parse_firmware); + /* * A lookup table for resource handlers. The indices are defined in * enum fw_resource_type. */ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { - [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, - [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, - [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, - [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, + [RSC_CARVEOUT] = rproc_handle_carveout, + [RSC_DEVMEM] = rproc_handle_devmem, + [RSC_TRACE] = rproc_handle_trace, + [RSC_VDEV] = rproc_handle_vdev, }; /* handle firmware resource entries before booting the remote processor */ @@ -1239,19 +1205,6 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc) return 0; } -/** - * rproc_coredump_cleanup() - clean up dump_segments list - * @rproc: the remote processor handle - */ -static void rproc_coredump_cleanup(struct rproc *rproc) -{ - struct rproc_dump_segment *entry, *tmp; - - list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) { - list_del(&entry->node); - kfree(entry); - } -} /** * rproc_resource_cleanup() - clean up and free all acquired resources @@ -1260,7 +1213,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc) * This function will free all resources acquired for @rproc, and it * is called whenever @rproc either shuts down or fails to boot. */ -static void rproc_resource_cleanup(struct rproc *rproc) +void rproc_resource_cleanup(struct rproc *rproc) { struct rproc_mem_entry *entry, *tmp; struct rproc_debug_trace *trace, *ttmp; @@ -1300,10 +1253,11 @@ static void rproc_resource_cleanup(struct rproc *rproc) /* clean up remote vdev entries */ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) - kref_put(&rvdev->refcount, rproc_vdev_release); + platform_device_unregister(rvdev->pdev); rproc_coredump_cleanup(rproc); } +EXPORT_SYMBOL(rproc_resource_cleanup); static int rproc_start(struct rproc *rproc, const struct firmware *fw) { @@ -1370,6 +1324,48 @@ reset_table_ptr: return ret; } +static int __rproc_attach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = rproc_prepare_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to prepare subdevices for %s: %d\n", + rproc->name, ret); + goto out; + } + + /* Attach to the remote processor */ + ret = rproc_attach_device(rproc); + if (ret) { + dev_err(dev, "can't attach to rproc %s: %d\n", + rproc->name, ret); + goto unprepare_subdevices; + } + + /* Start any subdevices for the remote processor */ + ret = rproc_start_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to probe subdevices for %s: %d\n", + rproc->name, ret); + goto stop_rproc; + } + + rproc->state = RPROC_ATTACHED; + + dev_info(dev, "remote processor %s is now attached\n", rproc->name); + + return 0; + +stop_rproc: + rproc->ops->stop(rproc); +unprepare_subdevices: + rproc_unprepare_subdevices(rproc); +out: + return ret; +} + /* * take a firmware and boot a remote processor with it. */ @@ -1383,12 +1379,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) if (ret) return ret; - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret); - return ret; - } - dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); /* @@ -1398,7 +1388,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) ret = rproc_enable_iommu(rproc); if (ret) { dev_err(dev, "can't enable iommu: %d\n", ret); - goto put_pm_runtime; + return ret; } /* Prepare rproc for firmware loading if needed */ @@ -1452,8 +1442,217 @@ unprepare_rproc: rproc_unprepare_device(rproc); disable_iommu: rproc_disable_iommu(rproc); -put_pm_runtime: - pm_runtime_put(dev); + return ret; +} + +static int rproc_set_rsc_table(struct rproc *rproc) +{ + struct resource_table *table_ptr; + struct device *dev = &rproc->dev; + size_t table_sz; + int ret; + + table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz); + if (!table_ptr) { + /* Not having a resource table is acceptable */ + return 0; + } + + if (IS_ERR(table_ptr)) { + ret = PTR_ERR(table_ptr); + dev_err(dev, "can't load resource table: %d\n", ret); + return ret; + } + + /* + * If it is possible to detach the remote processor, keep an untouched + * copy of the resource table. That way we can start fresh again when + * the remote processor is re-attached, that is: + * + * DETACHED -> ATTACHED -> DETACHED -> ATTACHED + * + * Free'd in rproc_reset_rsc_table_on_detach() and + * rproc_reset_rsc_table_on_stop(). + */ + if (rproc->ops->detach) { + rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL); + if (!rproc->clean_table) + return -ENOMEM; + } else { + rproc->clean_table = NULL; + } + + rproc->cached_table = NULL; + rproc->table_ptr = table_ptr; + rproc->table_sz = table_sz; + + return 0; +} + +static int rproc_reset_rsc_table_on_detach(struct rproc *rproc) +{ + struct resource_table *table_ptr; + + /* A resource table was never retrieved, nothing to do here */ + if (!rproc->table_ptr) + return 0; + + /* + * If we made it to this point a clean_table _must_ have been + * allocated in rproc_set_rsc_table(). If one isn't present + * something went really wrong and we must complain. + */ + if (WARN_ON(!rproc->clean_table)) + return -EINVAL; + + /* Remember where the external entity installed the resource table */ + table_ptr = rproc->table_ptr; + + /* + * If we made it here the remote processor was started by another + * entity and a cache table doesn't exist. As such make a copy of + * the resource table currently used by the remote processor and + * use that for the rest of the shutdown process. The memory + * allocated here is free'd in rproc_detach(). + */ + rproc->cached_table = kmemdup(rproc->table_ptr, + rproc->table_sz, GFP_KERNEL); + if (!rproc->cached_table) + return -ENOMEM; + + /* + * Use a copy of the resource table for the remainder of the + * shutdown process. + */ + rproc->table_ptr = rproc->cached_table; + + /* + * Reset the memory area where the firmware loaded the resource table + * to its original value. That way when we re-attach the remote + * processor the resource table is clean and ready to be used again. + */ + memcpy(table_ptr, rproc->clean_table, rproc->table_sz); + + /* + * The clean resource table is no longer needed. Allocated in + * rproc_set_rsc_table(). + */ + kfree(rproc->clean_table); + + return 0; +} + +static int rproc_reset_rsc_table_on_stop(struct rproc *rproc) +{ + /* A resource table was never retrieved, nothing to do here */ + if (!rproc->table_ptr) + return 0; + + /* + * If a cache table exists the remote processor was started by + * the remoteproc core. That cache table should be used for + * the rest of the shutdown process. + */ + if (rproc->cached_table) + goto out; + + /* + * If we made it here the remote processor was started by another + * entity and a cache table doesn't exist. As such make a copy of + * the resource table currently used by the remote processor and + * use that for the rest of the shutdown process. The memory + * allocated here is free'd in rproc_shutdown(). + */ + rproc->cached_table = kmemdup(rproc->table_ptr, + rproc->table_sz, GFP_KERNEL); + if (!rproc->cached_table) + return -ENOMEM; + + /* + * Since the remote processor is being switched off the clean table + * won't be needed. Allocated in rproc_set_rsc_table(). + */ + kfree(rproc->clean_table); + +out: + /* + * Use a copy of the resource table for the remainder of the + * shutdown process. + */ + rproc->table_ptr = rproc->cached_table; + return 0; +} + +/* + * Attach to remote processor - similar to rproc_fw_boot() but without + * the steps that deal with the firmware image. + */ +static int rproc_attach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + /* + * if enabling an IOMMU isn't relevant for this rproc, this is + * just a nop + */ + ret = rproc_enable_iommu(rproc); + if (ret) { + dev_err(dev, "can't enable iommu: %d\n", ret); + return ret; + } + + /* Do anything that is needed to boot the remote processor */ + ret = rproc_prepare_device(rproc); + if (ret) { + dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret); + goto disable_iommu; + } + + ret = rproc_set_rsc_table(rproc); + if (ret) { + dev_err(dev, "can't load resource table: %d\n", ret); + goto clean_up_resources; + } + + /* reset max_notifyid */ + rproc->max_notifyid = -1; + + /* reset handled vdev */ + rproc->nb_vdev = 0; + + /* + * Handle firmware resources required to attach to a remote processor. + * Because we are attaching rather than booting the remote processor, + * we expect the platform driver to properly set rproc->table_ptr. + */ + ret = rproc_handle_resources(rproc, rproc_loading_handlers); + if (ret) { + dev_err(dev, "Failed to process resources: %d\n", ret); + goto clean_up_resources; + } + + /* Allocate carveout resources associated to rproc */ + ret = rproc_alloc_registered_carveouts(rproc); + if (ret) { + dev_err(dev, "Failed to allocate associated carveouts: %d\n", + ret); + goto clean_up_resources; + } + + ret = __rproc_attach(rproc); + if (ret) + goto clean_up_resources; + + return 0; + +clean_up_resources: + rproc_resource_cleanup(rproc); + /* release HW resources if needed */ + rproc_unprepare_device(rproc); + kfree(rproc->clean_table); +disable_iommu: + rproc_disable_iommu(rproc); return ret; } @@ -1479,10 +1678,19 @@ static int rproc_trigger_auto_boot(struct rproc *rproc) int ret; /* + * Since the remote processor is in a detached state, it has already + * been booted by another entity. As such there is no point in waiting + * for a firmware image to be loaded, we can simply initiate the process + * of attaching to it immediately. + */ + if (rproc->state == RPROC_DETACHED) + return rproc_boot(rproc); + + /* * We're initiating an asynchronous firmware loading, so we can * be built-in kernel code, without hanging the boot process. */ - ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, rproc->firmware, &rproc->dev, GFP_KERNEL, rproc, rproc_auto_boot_callback); if (ret < 0) @@ -1496,11 +1704,20 @@ static int rproc_stop(struct rproc *rproc, bool crashed) struct device *dev = &rproc->dev; int ret; + /* No need to continue if a stop() operation has not been provided */ + if (!rproc->ops->stop) + return -EINVAL; + /* Stop any subdevices for the remote processor */ rproc_stop_subdevices(rproc, crashed); /* the installed resource table is no longer accessible */ - rproc->table_ptr = rproc->cached_table; + ret = rproc_reset_rsc_table_on_stop(rproc); + if (ret) { + dev_err(dev, "can't reset resource table: %d\n", ret); + return ret; + } + /* power off the remote processor */ ret = rproc->ops->stop(rproc); @@ -1518,181 +1735,81 @@ static int rproc_stop(struct rproc *rproc, bool crashed) return 0; } -/** - * rproc_coredump_add_segment() - add segment of device memory to coredump - * @rproc: handle of a remote processor - * @da: device address - * @size: size of segment - * - * Add device memory to the list of segments to be included in a coredump for - * the remoteproc. - * - * Return: 0 on success, negative errno on error. +/* + * __rproc_detach(): Does the opposite of __rproc_attach() */ -int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size) +static int __rproc_detach(struct rproc *rproc) { - struct rproc_dump_segment *segment; - - segment = kzalloc(sizeof(*segment), GFP_KERNEL); - if (!segment) - return -ENOMEM; + struct device *dev = &rproc->dev; + int ret; - segment->da = da; - segment->size = size; + /* No need to continue if a detach() operation has not been provided */ + if (!rproc->ops->detach) + return -EINVAL; - list_add_tail(&segment->node, &rproc->dump_segments); + /* Stop any subdevices for the remote processor */ + rproc_stop_subdevices(rproc, false); - return 0; -} -EXPORT_SYMBOL(rproc_coredump_add_segment); + /* the installed resource table is no longer accessible */ + ret = rproc_reset_rsc_table_on_detach(rproc); + if (ret) { + dev_err(dev, "can't reset resource table: %d\n", ret); + return ret; + } -/** - * rproc_coredump_add_custom_segment() - add custom coredump segment - * @rproc: handle of a remote processor - * @da: device address - * @size: size of segment - * @dumpfn: custom dump function called for each segment during coredump - * @priv: private data - * - * Add device memory to the list of segments to be included in the coredump - * and associate the segment with the given custom dump function and private - * data. - * - * Return: 0 on success, negative errno on error. - */ -int rproc_coredump_add_custom_segment(struct rproc *rproc, - dma_addr_t da, size_t size, - void (*dumpfn)(struct rproc *rproc, - struct rproc_dump_segment *segment, - void *dest), - void *priv) -{ - struct rproc_dump_segment *segment; + /* Tell the remote processor the core isn't available anymore */ + ret = rproc->ops->detach(rproc); + if (ret) { + dev_err(dev, "can't detach from rproc: %d\n", ret); + return ret; + } - segment = kzalloc(sizeof(*segment), GFP_KERNEL); - if (!segment) - return -ENOMEM; + rproc_unprepare_subdevices(rproc); - segment->da = da; - segment->size = size; - segment->priv = priv; - segment->dump = dumpfn; + rproc->state = RPROC_DETACHED; - list_add_tail(&segment->node, &rproc->dump_segments); + dev_info(dev, "detached remote processor %s\n", rproc->name); return 0; } -EXPORT_SYMBOL(rproc_coredump_add_custom_segment); -/** - * rproc_coredump_set_elf_info() - set coredump elf information - * @rproc: handle of a remote processor - * @class: elf class for coredump elf file - * @machine: elf machine for coredump elf file - * - * Set elf information which will be used for coredump elf file. - * - * Return: 0 on success, negative errno on error. - */ -int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) +static int rproc_attach_recovery(struct rproc *rproc) { - if (class != ELFCLASS64 && class != ELFCLASS32) - return -EINVAL; + int ret; - rproc->elf_class = class; - rproc->elf_machine = machine; + ret = __rproc_detach(rproc); + if (ret) + return ret; - return 0; + return __rproc_attach(rproc); } -EXPORT_SYMBOL(rproc_coredump_set_elf_info); -/** - * rproc_coredump() - perform coredump - * @rproc: rproc handle - * - * This function will generate an ELF header for the registered segments - * and create a devcoredump device associated with rproc. - */ -static void rproc_coredump(struct rproc *rproc) -{ - struct rproc_dump_segment *segment; - void *phdr; - void *ehdr; - size_t data_size; - size_t offset; - void *data; - void *ptr; - u8 class = rproc->elf_class; - int phnum = 0; - - if (list_empty(&rproc->dump_segments)) - return; +static int rproc_boot_recovery(struct rproc *rproc) +{ + const struct firmware *firmware_p; + struct device *dev = &rproc->dev; + int ret; - if (class == ELFCLASSNONE) { - dev_err(&rproc->dev, "Elf class is not set\n"); - return; - } + ret = rproc_stop(rproc, true); + if (ret) + return ret; - data_size = elf_size_of_hdr(class); - list_for_each_entry(segment, &rproc->dump_segments, node) { - data_size += elf_size_of_phdr(class) + segment->size; + /* generate coredump */ + rproc->ops->coredump(rproc); - phnum++; + /* load firmware */ + ret = request_firmware(&firmware_p, rproc->firmware, dev); + if (ret < 0) { + dev_err(dev, "request_firmware failed: %d\n", ret); + return ret; } - data = vmalloc(data_size); - if (!data) - return; - - ehdr = data; - - memset(ehdr, 0, elf_size_of_hdr(class)); - /* e_ident field is common for both elf32 and elf64 */ - elf_hdr_init_ident(ehdr, class); - - elf_hdr_set_e_type(class, ehdr, ET_CORE); - elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine); - elf_hdr_set_e_version(class, ehdr, EV_CURRENT); - elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr); - elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class)); - elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class)); - elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class)); - elf_hdr_set_e_phnum(class, ehdr, phnum); - - phdr = data + elf_hdr_get_e_phoff(class, ehdr); - offset = elf_hdr_get_e_phoff(class, ehdr); - offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr); - - list_for_each_entry(segment, &rproc->dump_segments, node) { - memset(phdr, 0, elf_size_of_phdr(class)); - elf_phdr_set_p_type(class, phdr, PT_LOAD); - elf_phdr_set_p_offset(class, phdr, offset); - elf_phdr_set_p_vaddr(class, phdr, segment->da); - elf_phdr_set_p_paddr(class, phdr, segment->da); - elf_phdr_set_p_filesz(class, phdr, segment->size); - elf_phdr_set_p_memsz(class, phdr, segment->size); - elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); - elf_phdr_set_p_align(class, phdr, 0); - - if (segment->dump) { - segment->dump(rproc, segment, data + offset); - } else { - ptr = rproc_da_to_va(rproc, segment->da, segment->size); - if (!ptr) { - dev_err(&rproc->dev, - "invalid coredump segment (%pad, %zu)\n", - &segment->da, segment->size); - memset(data + offset, 0xff, segment->size); - } else { - memcpy(data + offset, ptr, segment->size); - } - } + /* boot the remote processor up again */ + ret = rproc_start(rproc, firmware_p); - offset += elf_phdr_get_p_filesz(class, phdr); - phdr += elf_size_of_phdr(class); - } + release_firmware(firmware_p); - dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + return ret; } /** @@ -1704,10 +1821,11 @@ static void rproc_coredump(struct rproc *rproc) * remoteproc functional again. * * This function can sleep, so it cannot be called from atomic context. + * + * Return: 0 on success or a negative value upon failure */ int rproc_trigger_recovery(struct rproc *rproc) { - const struct firmware *firmware_p; struct device *dev = &rproc->dev; int ret; @@ -1721,24 +1839,10 @@ int rproc_trigger_recovery(struct rproc *rproc) dev_err(dev, "recovering %s\n", rproc->name); - ret = rproc_stop(rproc, true); - if (ret) - goto unlock_mutex; - - /* generate coredump */ - rproc_coredump(rproc); - - /* load firmware */ - ret = request_firmware(&firmware_p, rproc->firmware, dev); - if (ret < 0) { - dev_err(dev, "request_firmware failed: %d\n", ret); - goto unlock_mutex; - } - - /* boot the remote processor up again */ - ret = rproc_start(rproc, firmware_p); - - release_firmware(firmware_p); + if (rproc_has_feature(rproc, RPROC_FEAT_ATTACH_ON_RECOVERY)) + ret = rproc_attach_recovery(rproc); + else + ret = rproc_boot_recovery(rproc); unlock_mutex: mutex_unlock(&rproc->lock); @@ -1761,12 +1865,18 @@ static void rproc_crash_handler_work(struct work_struct *work) mutex_lock(&rproc->lock); - if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { + if (rproc->state == RPROC_CRASHED) { /* handle only the first crash detected */ mutex_unlock(&rproc->lock); return; } + if (rproc->state == RPROC_OFFLINE) { + /* Don't recover if the remote processor was stopped */ + mutex_unlock(&rproc->lock); + goto out; + } + rproc->state = RPROC_CRASHED; dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, rproc->name); @@ -1776,6 +1886,7 @@ static void rproc_crash_handler_work(struct work_struct *work) if (!rproc->recovery_disabled) rproc_trigger_recovery(rproc); +out: pm_relax(rproc->dev.parent); } @@ -1788,7 +1899,7 @@ static void rproc_crash_handler_work(struct work_struct *work) * If the remote processor is already powered on, this function immediately * returns (successfully). * - * Returns 0 on success, and an appropriate error value otherwise. + * Return: 0 on success, and an appropriate error value otherwise */ int rproc_boot(struct rproc *rproc) { @@ -1815,24 +1926,30 @@ int rproc_boot(struct rproc *rproc) goto unlock_mutex; } - /* skip the boot process if rproc is already powered up */ + /* skip the boot or attach process if rproc is already powered up */ if (atomic_inc_return(&rproc->power) > 1) { ret = 0; goto unlock_mutex; } - dev_info(dev, "powering up %s\n", rproc->name); + if (rproc->state == RPROC_DETACHED) { + dev_info(dev, "attaching to %s\n", rproc->name); - /* load firmware */ - ret = request_firmware(&firmware_p, rproc->firmware, dev); - if (ret < 0) { - dev_err(dev, "request_firmware failed: %d\n", ret); - goto downref_rproc; - } + ret = rproc_attach(rproc); + } else { + dev_info(dev, "powering up %s\n", rproc->name); - ret = rproc_fw_boot(rproc, firmware_p); + /* load firmware */ + ret = request_firmware(&firmware_p, rproc->firmware, dev); + if (ret < 0) { + dev_err(dev, "request_firmware failed: %d\n", ret); + goto downref_rproc; + } - release_firmware(firmware_p); + ret = rproc_fw_boot(rproc, firmware_p); + + release_firmware(firmware_p); + } downref_rproc: if (ret) @@ -1861,8 +1978,10 @@ EXPORT_SYMBOL(rproc_boot); * which means that the @rproc handle stays valid even after rproc_shutdown() * returns, and users can still use it with a subsequent rproc_boot(), if * needed. + * + * Return: 0 on success, and an appropriate error value otherwise */ -void rproc_shutdown(struct rproc *rproc) +int rproc_shutdown(struct rproc *rproc) { struct device *dev = &rproc->dev; int ret; @@ -1870,7 +1989,13 @@ void rproc_shutdown(struct rproc *rproc) ret = mutex_lock_interruptible(&rproc->lock); if (ret) { dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); - return; + return ret; + } + + if (rproc->state != RPROC_RUNNING && + rproc->state != RPROC_ATTACHED) { + ret = -EINVAL; + goto out; } /* if the remote proc is still needed, bail out */ @@ -1891,18 +2016,83 @@ void rproc_shutdown(struct rproc *rproc) rproc_disable_iommu(rproc); - pm_runtime_put(dev); - /* Free the copy of the resource table */ kfree(rproc->cached_table); rproc->cached_table = NULL; rproc->table_ptr = NULL; out: mutex_unlock(&rproc->lock); + return ret; } EXPORT_SYMBOL(rproc_shutdown); /** + * rproc_detach() - Detach the remote processor from the + * remoteproc core + * + * @rproc: the remote processor + * + * Detach a remote processor (previously attached to with rproc_attach()). + * + * In case @rproc is still being used by an additional user(s), then + * this function will just decrement the power refcount and exit, + * without disconnecting the device. + * + * Function rproc_detach() calls __rproc_detach() in order to let a remote + * processor know that services provided by the application processor are + * no longer available. From there it should be possible to remove the + * platform driver and even power cycle the application processor (if the HW + * supports it) without needing to switch off the remote processor. + * + * Return: 0 on success, and an appropriate error value otherwise + */ +int rproc_detach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); + return ret; + } + + if (rproc->state != RPROC_ATTACHED) { + ret = -EINVAL; + goto out; + } + + /* if the remote proc is still needed, bail out */ + if (!atomic_dec_and_test(&rproc->power)) { + ret = 0; + goto out; + } + + ret = __rproc_detach(rproc); + if (ret) { + atomic_inc(&rproc->power); + goto out; + } + + /* clean up all acquired resources */ + rproc_resource_cleanup(rproc); + + /* release HW resources if needed */ + rproc_unprepare_device(rproc); + + rproc_disable_iommu(rproc); + + /* Free the copy of the resource table */ + kfree(rproc->cached_table); + rproc->cached_table = NULL; + rproc->table_ptr = NULL; +out: + mutex_unlock(&rproc->lock); + return ret; +} +EXPORT_SYMBOL(rproc_detach); + +/** * rproc_get_by_phandle() - find a remote processor by phandle * @phandle: phandle to the rproc * @@ -1912,12 +2102,13 @@ EXPORT_SYMBOL(rproc_shutdown); * This function increments the remote processor's refcount, so always * use rproc_put() to decrement it back once rproc isn't needed anymore. * - * Returns the rproc handle on success, and NULL on failure. + * Return: rproc handle on success, and NULL on failure */ #ifdef CONFIG_OF struct rproc *rproc_get_by_phandle(phandle phandle) { struct rproc *rproc = NULL, *r; + struct device_driver *driver; struct device_node *np; np = of_find_node_by_phandle(phandle); @@ -1926,9 +2117,28 @@ struct rproc *rproc_get_by_phandle(phandle phandle) rcu_read_lock(); list_for_each_entry_rcu(r, &rproc_list, node) { - if (r->dev.parent && r->dev.parent->of_node == np) { + if (r->dev.parent && device_match_of_node(r->dev.parent, np)) { /* prevent underlying implementation from being removed */ - if (!try_module_get(r->dev.parent->driver->owner)) { + + /* + * If the remoteproc's parent has a driver, the + * remoteproc is not part of a cluster and we can use + * that driver. + */ + driver = r->dev.parent->driver; + + /* + * If the remoteproc's parent does not have a driver, + * look for the driver associated with the cluster. + */ + if (!driver) { + if (r->dev.parent->parent) + driver = r->dev.parent->parent->driver; + if (!driver) + break; + } + + if (!try_module_get(driver->owner)) { dev_err(&r->dev, "can't get owner\n"); break; } @@ -1953,6 +2163,106 @@ struct rproc *rproc_get_by_phandle(phandle phandle) EXPORT_SYMBOL(rproc_get_by_phandle); /** + * rproc_set_firmware() - assign a new firmware + * @rproc: rproc handle to which the new firmware is being assigned + * @fw_name: new firmware name to be assigned + * + * This function allows remoteproc drivers or clients to configure a custom + * firmware name that is different from the default name used during remoteproc + * registration. The function does not trigger a remote processor boot, + * only sets the firmware name used for a subsequent boot. This function + * should also be called only when the remote processor is offline. + * + * This allows either the userspace to configure a different name through + * sysfs or a kernel-level remoteproc or a remoteproc client driver to set + * a specific firmware when it is controlling the boot and shutdown of the + * remote processor. + * + * Return: 0 on success or a negative value upon failure + */ +int rproc_set_firmware(struct rproc *rproc, const char *fw_name) +{ + struct device *dev; + int ret, len; + char *p; + + if (!rproc || !fw_name) + return -EINVAL; + + dev = rproc->dev.parent; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); + return -EINVAL; + } + + if (rproc->state != RPROC_OFFLINE) { + dev_err(dev, "can't change firmware while running\n"); + ret = -EBUSY; + goto out; + } + + len = strcspn(fw_name, "\n"); + if (!len) { + dev_err(dev, "can't provide empty string for firmware name\n"); + ret = -EINVAL; + goto out; + } + + p = kstrndup(fw_name, len, GFP_KERNEL); + if (!p) { + ret = -ENOMEM; + goto out; + } + + kfree_const(rproc->firmware); + rproc->firmware = p; + +out: + mutex_unlock(&rproc->lock); + return ret; +} +EXPORT_SYMBOL(rproc_set_firmware); + +static int rproc_validate(struct rproc *rproc) +{ + switch (rproc->state) { + case RPROC_OFFLINE: + /* + * An offline processor without a start() + * function makes no sense. + */ + if (!rproc->ops->start) + return -EINVAL; + break; + case RPROC_DETACHED: + /* + * A remote processor in a detached state without an + * attach() function makes not sense. + */ + if (!rproc->ops->attach) + return -EINVAL; + /* + * When attaching to a remote processor the device memory + * is already available and as such there is no need to have a + * cached table. + */ + if (rproc->cached_table) + return -EINVAL; + break; + default: + /* + * When adding a remote processor, the state of the device + * can be offline or detached, nothing else. + */ + return -EINVAL; + } + + return 0; +} + +/** * rproc_add() - register a remote processor * @rproc: the remote processor handle to register * @@ -1962,8 +2272,6 @@ EXPORT_SYMBOL(rproc_get_by_phandle); * This is called by the platform-specific rproc implementation, whenever * a new remote processor device is probed. * - * Returns 0 on success and an appropriate error code otherwise. - * * Note: this function initiates an asynchronous firmware loading * context, which will look for virtio devices supported by the rproc's * firmware. @@ -1971,16 +2279,29 @@ EXPORT_SYMBOL(rproc_get_by_phandle); * If found, those virtio devices will be created and added, so as a result * of registering this remote processor, additional virtio drivers might be * probed. + * + * Return: 0 on success and an appropriate error code otherwise */ int rproc_add(struct rproc *rproc) { struct device *dev = &rproc->dev; int ret; - ret = device_add(dev); + ret = rproc_validate(rproc); if (ret < 0) return ret; + /* add char device for this remoteproc */ + ret = rproc_char_device_add(rproc); + if (ret < 0) + return ret; + + ret = device_add(dev); + if (ret < 0) { + put_device(dev); + goto rproc_remove_cdev; + } + dev_info(dev, "%s is available\n", rproc->name); /* create debugfs entries */ @@ -1990,7 +2311,7 @@ int rproc_add(struct rproc *rproc) if (rproc->auto_boot) { ret = rproc_trigger_auto_boot(rproc); if (ret < 0) - return ret; + goto rproc_remove_dev; } /* expose to rproc_get_by_phandle users */ @@ -1999,6 +2320,13 @@ int rproc_add(struct rproc *rproc) mutex_unlock(&rproc_list_mutex); return 0; + +rproc_remove_dev: + rproc_delete_debug_dir(rproc); + device_del(dev); +rproc_remove_cdev: + rproc_char_device_remove(rproc); + return ret; } EXPORT_SYMBOL(rproc_add); @@ -2015,7 +2343,7 @@ static void devm_rproc_remove(void *rproc) * This function performs like rproc_add() but the registered rproc device will * automatically be removed on driver detach. * - * Returns: 0 on success, negative errno on failure + * Return: 0 on success, negative errno on failure */ int devm_rproc_add(struct device *dev, struct rproc *rproc) { @@ -2047,7 +2375,7 @@ static void rproc_type_release(struct device *dev) idr_destroy(&rproc->notifyids); if (rproc->index >= 0) - ida_simple_remove(&rproc_dev_index, rproc->index); + ida_free(&rproc_dev_index, rproc->index); kfree_const(rproc->firmware); kfree_const(rproc->name); @@ -2088,6 +2416,10 @@ static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops) if (!rproc->ops) return -ENOMEM; + /* Default to rproc_coredump if no coredump function is specified */ + if (!rproc->ops->coredump) + rproc->ops->coredump = rproc_coredump; + if (rproc->ops->load) return 0; @@ -2119,10 +2451,10 @@ static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops) * implementations should then call rproc_add() to complete * the registration of the remote processor. * - * On success the new rproc is returned, and on failure, NULL. - * * Note: _never_ directly deallocate @rproc, even if it was not registered * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). + * + * Return: new rproc pointer on success, and NULL on failure */ struct rproc *rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, @@ -2149,6 +2481,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->dev.driver_data = rproc; idr_init(&rproc->notifyids); + /* Assign a unique device index and name */ + rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL); + if (rproc->index < 0) { + dev_err(dev, "ida_alloc failed: %d\n", rproc->index); + goto put_device; + } + rproc->name = kstrdup_const(name, GFP_KERNEL); if (!rproc->name) goto put_device; @@ -2159,13 +2498,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, if (rproc_alloc_ops(rproc, ops)) goto put_device; - /* Assign a unique device index and name */ - rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); - if (rproc->index < 0) { - dev_err(dev, "ida_simple_get failed: %d\n", rproc->index); - goto put_device; - } - dev_set_name(&rproc->dev, "remoteproc%d", rproc->index); atomic_set(&rproc->power, 0); @@ -2183,9 +2515,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->state = RPROC_OFFLINE; - pm_runtime_no_callbacks(&rproc->dev); - pm_runtime_enable(&rproc->dev); - return rproc; put_device: @@ -2205,7 +2534,6 @@ EXPORT_SYMBOL(rproc_alloc); */ void rproc_free(struct rproc *rproc) { - pm_runtime_disable(&rproc->dev); put_device(&rproc->dev); } EXPORT_SYMBOL(rproc_free); @@ -2221,7 +2549,11 @@ EXPORT_SYMBOL(rproc_free); */ void rproc_put(struct rproc *rproc) { - module_put(rproc->dev.parent->driver->owner); + if (rproc->dev.parent->driver) + module_put(rproc->dev.parent->driver->owner); + else + module_put(rproc->dev.parent->parent->driver->owner); + put_device(&rproc->dev); } EXPORT_SYMBOL(rproc_put); @@ -2239,17 +2571,15 @@ EXPORT_SYMBOL(rproc_put); * of the outstanding reference created by rproc_alloc. To decrement that * one last refcount, one still needs to call rproc_free(). * - * Returns 0 on success and -EINVAL if @rproc isn't valid. + * Return: 0 on success and -EINVAL if @rproc isn't valid */ int rproc_del(struct rproc *rproc) { if (!rproc) return -EINVAL; - /* if rproc is marked always-on, rproc_add() booted it */ /* TODO: make sure this works with rproc->power > 1 */ - if (rproc->auto_boot) - rproc_shutdown(rproc); + rproc_shutdown(rproc); mutex_lock(&rproc->lock); rproc->state = RPROC_DELETED; @@ -2266,6 +2596,7 @@ int rproc_del(struct rproc *rproc) synchronize_rcu(); device_del(&rproc->dev); + rproc_char_device_remove(rproc); return 0; } @@ -2287,7 +2618,7 @@ static void devm_rproc_free(struct device *dev, void *res) * This function performs like rproc_alloc() but the acquired rproc device will * automatically be released on driver detach. * - * Returns: new rproc instance, or NULL on failure + * Return: new rproc instance, or NULL on failure */ struct rproc *devm_rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, @@ -2339,7 +2670,7 @@ EXPORT_SYMBOL(rproc_remove_subdev); * rproc_get_by_child() - acquire rproc handle of @dev's ancestor * @dev: child device to find ancestor of * - * Returns the ancestor rproc instance, or NULL if not found. + * Return: the ancestor rproc instance, or NULL if not found */ struct rproc *rproc_get_by_child(struct device *dev) { @@ -2376,8 +2707,7 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) dev_err(&rproc->dev, "crash detected in %s: type %s\n", rproc->name, rproc_crash_to_string(type)); - /* create a new task to handle the error */ - schedule_work(&rproc->crash_handler); + queue_work(rproc_recovery_wq, &rproc->crash_handler); } EXPORT_SYMBOL(rproc_report_crash); @@ -2390,7 +2720,11 @@ static int rproc_panic_handler(struct notifier_block *nb, unsigned long event, rcu_read_lock(); list_for_each_entry_rcu(rproc, &rproc_list, node) { - if (!rproc->ops->panic || rproc->state != RPROC_RUNNING) + if (!rproc->ops->panic) + continue; + + if (rproc->state != RPROC_RUNNING && + rproc->state != RPROC_ATTACHED) continue; d = rproc->ops->panic(rproc); @@ -2422,8 +2756,16 @@ static void __exit rproc_exit_panic(void) static int __init remoteproc_init(void) { + rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq", + WQ_UNBOUND | WQ_FREEZABLE, 0); + if (!rproc_recovery_wq) { + pr_err("remoteproc: creation of rproc_recovery_wq failed\n"); + return -ENOMEM; + } + rproc_init_sysfs(); rproc_init_debugfs(); + rproc_init_cdev(); rproc_init_panic(); return 0; @@ -2434,11 +2776,14 @@ static void __exit remoteproc_exit(void) { ida_destroy(&rproc_dev_index); + if (!rproc_recovery_wq) + return; + rproc_exit_panic(); rproc_exit_debugfs(); rproc_exit_sysfs(); + destroy_workqueue(rproc_recovery_wq); } module_exit(remoteproc_exit); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Generic Remote Processor Framework"); |
