diff options
Diffstat (limited to 'drivers/firmware/arm_ffa/driver.c')
-rw-r--r-- | drivers/firmware/arm_ffa/driver.c | 490 |
1 files changed, 366 insertions, 124 deletions
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index f2556a8e9401..2c2ec3c35f15 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -26,6 +26,7 @@ #include <linux/arm_ffa.h> #include <linux/bitfield.h> #include <linux/cpuhotplug.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/hashtable.h> #include <linux/interrupt.h> @@ -53,11 +54,8 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/* - * Keeping RX TX buffer size as 4K for now - * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config - */ -#define RXTX_BUFFER_SIZE SZ_4K +#define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0) +#define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK) #define FFA_MAX_NOTIFICATIONS 64 @@ -75,6 +73,7 @@ static const int ffa_linux_errmap[] = { -EAGAIN, /* FFA_RET_RETRY */ -ECANCELED, /* FFA_RET_ABORTED */ -ENODATA, /* FFA_RET_NO_DATA */ + -EAGAIN, /* FFA_RET_NOT_READY */ }; static inline int ffa_to_linux_errno(int errno) @@ -97,15 +96,18 @@ struct ffa_drv_info { struct mutex tx_lock; /* lock to protect Tx buffer */ void *rx_buffer; void *tx_buffer; + size_t rxtx_bufsz; bool mem_ops_native; + bool msg_direct_req2_supp; bool bitmap_created; bool notif_enabled; unsigned int sched_recv_irq; + unsigned int notif_pend_irq; unsigned int cpuhp_state; struct ffa_pcpu_irq __percpu *irq_pcpu; struct workqueue_struct *notif_pcpu_wq; struct work_struct notif_pcpu_work; - struct work_struct irq_work; + struct work_struct sched_recv_irq_work; struct xarray partition_info; DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); struct mutex notify_lock; /* lock to protect notifier hashtable */ @@ -210,6 +212,32 @@ static int ffa_rxtx_unmap(u16 vm_id) return 0; } +static int ffa_features(u32 func_feat_id, u32 input_props, + u32 *if_props_1, u32 *if_props_2) +{ + ffa_value_t id; + + if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { + pr_err("%s: Invalid Parameters: %x, %x", __func__, + func_feat_id, input_props); + return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); + } + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, + }, &id); + + if (id.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)id.a2); + + if (if_props_1) + *if_props_1 = id.a2; + if (if_props_2) + *if_props_2 = id.a3; + + return 0; +} + #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0) /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */ @@ -259,17 +287,75 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, return count; } +#define LAST_INDEX_MASK GENMASK(15, 0) +#define CURRENT_INDEX_MASK GENMASK(31, 16) +#define UUID_INFO_TAG_MASK GENMASK(47, 32) +#define PARTITION_INFO_SZ_MASK GENMASK(63, 48) +#define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1) +#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x)))) +#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x)))) +#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x)))) +static int +__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, + struct ffa_partition_info *buffer, int num_parts) +{ + u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0; + ffa_value_t partition_info; + + do { + start_idx = prev_idx ? prev_idx + 1 : 0; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_PARTITION_INFO_GET_REGS, + .a1 = (u64)uuid1 << 32 | uuid0, + .a2 = (u64)uuid3 << 32 | uuid2, + .a3 = start_idx | tag << 16, + }, &partition_info); + + if (partition_info.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)partition_info.a2); + + if (!count) + count = PARTITION_COUNT(partition_info.a2); + if (!buffer || !num_parts) /* count only */ + return count; + + cur_idx = CURRENT_INDEX(partition_info.a2); + tag = UUID_INFO_TAG(partition_info.a2); + buf_sz = PARTITION_INFO_SZ(partition_info.a2); + if (buf_sz > sizeof(*buffer)) + buf_sz = sizeof(*buffer); + + memcpy(buffer + prev_idx * buf_sz, &partition_info.a3, + (cur_idx - start_idx + 1) * buf_sz); + prev_idx = cur_idx; + + } while (cur_idx < (count - 1)); + + return count; +} + /* buffer is allocated and caller must free the same if returned count > 0 */ static int ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) { int count; u32 uuid0_4[4]; + bool reg_mode = false; struct ffa_partition_info *pbuf; + if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL)) + reg_mode = true; + export_uuid((u8 *)uuid0_4, uuid); - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], - uuid0_4[3], NULL, 0); + if (reg_mode) + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + NULL, 0); + else + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + NULL, 0); if (count <= 0) return count; @@ -277,8 +363,14 @@ ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer) if (!pbuf) return -ENOMEM; - count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2], - uuid0_4[3], pbuf, count); + if (reg_mode) + count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + pbuf, count); + else + count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], + uuid0_4[2], uuid0_4[3], + pbuf, count); if (count <= 0) kfree(pbuf); else @@ -304,6 +396,18 @@ static int ffa_id_get(u16 *vm_id) return 0; } +static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret) +{ + while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) { + if (ret->a0 == FFA_YIELD) + fsleep(1000); + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_RUN, .a1 = ret->a1, + }, ret); + } +} + static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, struct ffa_send_direct_data *data) { @@ -324,10 +428,7 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, .a6 = data->data3, .a7 = data->data4, }, &ret); - while (ret.a0 == FFA_INTERRUPT) - invoke_ffa_fn((ffa_value_t){ - .a0 = FFA_RUN, .a1 = ret.a1, - }, &ret); + ffa_msg_send_wait_for_completion(&ret); if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); @@ -344,6 +445,69 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, return -EINVAL; } +static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + struct ffa_indirect_msg_hdr *msg; + ffa_value_t ret; + int retval = 0; + + if (sz > (drv_info->rxtx_bufsz - sizeof(*msg))) + return -ERANGE; + + mutex_lock(&drv_info->tx_lock); + + msg = drv_info->tx_buffer; + msg->flags = 0; + msg->res0 = 0; + msg->offset = sizeof(*msg); + msg->send_recv_id = src_dst_ids; + msg->size = sz; + memcpy((u8 *)msg + msg->offset, buf, sz); + + /* flags = 0, sender VMID = 0 works for both physical/virtual NS */ + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0 + }, &ret); + + if (ret.a0 == FFA_ERROR) + retval = ffa_to_linux_errno((int)ret.a2); + + mutex_unlock(&drv_info->tx_lock); + return retval; +} + +static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid, + struct ffa_send_direct_data2 *data) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + union { + uuid_t uuid; + __le64 regs[2]; + } uuid_regs = { .uuid = *uuid }; + ffa_value_t ret, args = { + .a0 = FFA_MSG_SEND_DIRECT_REQ2, + .a1 = src_dst_ids, + .a2 = le64_to_cpu(uuid_regs.regs[0]), + .a3 = le64_to_cpu(uuid_regs.regs[1]), + }; + memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data)); + + invoke_ffa_fn(args, &ret); + + ffa_msg_send_wait_for_completion(&ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) { + memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data)); + return 0; + } + + return -EINVAL; +} + static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, u32 len, u64 *handle) { @@ -528,9 +692,10 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) { int ret; void *buffer; + size_t rxtx_bufsz = drv_info->rxtx_bufsz; if (!args->use_txbuf) { - buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!buffer) return -ENOMEM; } else { @@ -538,12 +703,12 @@ static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args) mutex_lock(&drv_info->tx_lock); } - ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args); + ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args); if (args->use_txbuf) mutex_unlock(&drv_info->tx_lock); else - free_pages_exact(buffer, RXTX_BUFFER_SIZE); + free_pages_exact(buffer, rxtx_bufsz); return ret < 0 ? ret : 0; } @@ -564,32 +729,6 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags) return 0; } -static int ffa_features(u32 func_feat_id, u32 input_props, - u32 *if_props_1, u32 *if_props_2) -{ - ffa_value_t id; - - if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) { - pr_err("%s: Invalid Parameters: %x, %x", __func__, - func_feat_id, input_props); - return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS); - } - - invoke_ffa_fn((ffa_value_t){ - .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props, - }, &id); - - if (id.a0 == FFA_ERROR) - return ffa_to_linux_errno((int)id.a2); - - if (if_props_1) - *if_props_1 = id.a2; - if (if_props_2) - *if_props_2 = id.a3; - - return 0; -} - static int ffa_notification_bitmap_create(void) { ffa_value_t ret; @@ -790,7 +929,7 @@ static void ffa_notification_info_get(void) part_id = packed_id_list[ids_processed++]; - if (!ids_count[list]) { /* Global Notification */ + if (ids_count[list] == 1) { /* Global Notification */ __do_sched_recv_cb(part_id, 0, false); continue; } @@ -825,11 +964,15 @@ static int ffa_run(struct ffa_device *dev, u16 vcpu) return 0; } -static void ffa_set_up_mem_ops_native_flag(void) +static void ffa_drvinfo_flags_init(void) { if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL)) drv_info->mem_ops_native = true; + + if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) || + !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL)) + drv_info->msg_direct_req2_supp = true; } static u32 ffa_api_version_get(void) @@ -870,6 +1013,21 @@ static int ffa_sync_send_receive(struct ffa_device *dev, dev->mode_32bit, data); } +static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) +{ + return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); +} + +static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid, + struct ffa_send_direct_data2 *data) +{ + if (!drv_info->msg_direct_req2_supp) + return -EOPNOTSUPP; + + return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id, + uuid, data); +} + static int ffa_memory_share(struct ffa_mem_ops_args *args) { if (drv_info->mem_ops_native) @@ -1108,7 +1266,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type) } } -static void notif_pcpu_irq_work_fn(struct work_struct *work) +static void notif_get_and_handle(void *unused) { int rc; struct ffa_notify_bitmaps bitmaps; @@ -1131,10 +1289,17 @@ ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) struct ffa_drv_info *info = cb_data; if (!is_per_vcpu) - notif_pcpu_irq_work_fn(&info->notif_pcpu_work); + notif_get_and_handle(info); else - queue_work_on(vcpu, info->notif_pcpu_wq, - &info->notif_pcpu_work); + smp_call_function_single(vcpu, notif_get_and_handle, info, 0); +} + +static void notif_pcpu_irq_work_fn(struct work_struct *work) +{ + struct ffa_drv_info *info = container_of(work, struct ffa_drv_info, + notif_pcpu_work); + + ffa_self_notif_handle(smp_processor_id(), true, info); } static const struct ffa_info_ops ffa_drv_info_ops = { @@ -1145,6 +1310,8 @@ static const struct ffa_info_ops ffa_drv_info_ops = { static const struct ffa_msg_ops ffa_drv_msg_ops = { .mode_32bit_set = ffa_mode_32bit_set, .sync_send_receive = ffa_sync_send_receive, + .indirect_send = ffa_indirect_msg_send, + .sync_send_receive2 = ffa_sync_send_receive2, }; static const struct ffa_mem_ops ffa_drv_mem_ops = { @@ -1178,14 +1345,6 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) int count, idx; struct ffa_partition_info *pbuf, *tpbuf; - /* - * FF-A v1.1 provides UUID for each partition as part of the discovery - * API, the discovered UUID must be populated in the device's UUID and - * there is no need to copy the same from the driver table. - */ - if (drv_info->version > FFA_VERSION_1_0) - return; - count = ffa_partition_probe(uuid, &pbuf); if (count <= 0) return; @@ -1196,14 +1355,48 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) kfree(pbuf); } +static int +ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) +{ + struct device *dev = data; + struct ffa_device *fdev = to_ffa_dev(dev); + + if (action == BUS_NOTIFY_BIND_DRIVER) { + struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); + const struct ffa_device_id *id_table = ffa_drv->id_table; + + /* + * FF-A v1.1 provides UUID for each partition as part of the + * discovery API, the discovered UUID must be populated in the + * device's UUID and there is no need to workaround by copying + * the same from the driver table. + */ + if (uuid_is_null(&fdev->uuid)) + ffa_device_match_uuid(fdev, &id_table->uuid); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block ffa_bus_nb = { + .notifier_call = ffa_bus_notifier, +}; + static int ffa_setup_partitions(void) { int count, idx, ret; - uuid_t uuid; struct ffa_device *ffa_dev; struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf; + if (drv_info->version == FFA_VERSION_1_0) { + ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb); + if (ret) + pr_err("Failed to register FF-A bus notifiers\n"); + } + count = ffa_partition_probe(&uuid_null, &pbuf); if (count <= 0) { pr_info("%s: No partitions found, error %d\n", __func__, count); @@ -1212,15 +1405,13 @@ static int ffa_setup_partitions(void) xa_init(&drv_info->partition_info); for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { - import_uuid(&uuid, (u8 *)tpbuf->uuid); - /* Note that if the UUID will be uuid_null, that will require - * ffa_device_match() to find the UUID of this partition id + * ffa_bus_notifier() to find the UUID of this partition id * with help of ffa_device_match_uuid(). FF-A v1.1 and above * provides UUID here for each partition as part of the * discovery API and the same is passed. */ - ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops); + ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops); if (!ffa_dev) { pr_err("%s: failed to register partition ID 0x%x\n", __func__, tpbuf->id); @@ -1252,8 +1443,6 @@ static int ffa_setup_partitions(void) /* Allocate for the host */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { - pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n", - __func__, drv_info->vm_id); /* Already registered devices are freed on bus_exit */ ffa_partitions_cleanup(); return -ENOMEM; @@ -1291,12 +1480,23 @@ static void ffa_partitions_cleanup(void) #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) #define FFA_FEAT_MANAGED_EXIT_INT (3) -static irqreturn_t irq_handler(int irq, void *irq_data) +static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data) +{ + struct ffa_pcpu_irq *pcpu = irq_data; + struct ffa_drv_info *info = pcpu->info; + + queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work); + + return IRQ_HANDLED; +} + +static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data) { struct ffa_pcpu_irq *pcpu = irq_data; struct ffa_drv_info *info = pcpu->info; - queue_work(info->notif_pcpu_wq, &info->irq_work); + queue_work_on(smp_processor_id(), info->notif_pcpu_wq, + &info->notif_pcpu_work); return IRQ_HANDLED; } @@ -1306,15 +1506,23 @@ static void ffa_sched_recv_irq_work_fn(struct work_struct *work) ffa_notification_info_get(); } -static int ffa_sched_recv_irq_map(void) +static int ffa_irq_map(u32 id) { - int ret, irq, sr_intid; + char *err_str; + int ret, irq, intid; + + if (id == FFA_FEAT_NOTIFICATION_PENDING_INT) + err_str = "Notification Pending Interrupt"; + else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT) + err_str = "Schedule Receiver Interrupt"; + else + err_str = "Unknown ID"; - /* The returned sr_intid is assumed to be SGI donated to NS world */ - ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL); + /* The returned intid is assumed to be SGI donated to NS world */ + ret = ffa_features(id, 0, &intid, NULL); if (ret < 0) { if (ret != -EOPNOTSUPP) - pr_err("Failed to retrieve scheduler Rx interrupt\n"); + pr_err("Failed to retrieve FF-A %s %u\n", err_str, id); return ret; } @@ -1329,12 +1537,12 @@ static int ffa_sched_recv_irq_map(void) oirq.np = gic; oirq.args_count = 1; - oirq.args[0] = sr_intid; + oirq.args[0] = intid; irq = irq_create_of_mapping(&oirq); of_node_put(gic); #ifdef CONFIG_ACPI } else { - irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE, + irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); #endif } @@ -1347,23 +1555,28 @@ static int ffa_sched_recv_irq_map(void) return irq; } -static void ffa_sched_recv_irq_unmap(void) +static void ffa_irq_unmap(unsigned int irq) { - if (drv_info->sched_recv_irq) { - irq_dispose_mapping(drv_info->sched_recv_irq); - drv_info->sched_recv_irq = 0; - } + if (!irq) + return; + irq_dispose_mapping(irq); } static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) { - enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + if (drv_info->sched_recv_irq) + enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + if (drv_info->notif_pend_irq) + enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE); return 0; } static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) { - disable_percpu_irq(drv_info->sched_recv_irq); + if (drv_info->sched_recv_irq) + disable_percpu_irq(drv_info->sched_recv_irq); + if (drv_info->notif_pend_irq) + disable_percpu_irq(drv_info->notif_pend_irq); return 0; } @@ -1382,13 +1595,16 @@ static void ffa_uninit_pcpu_irq(void) if (drv_info->sched_recv_irq) free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); + if (drv_info->notif_pend_irq) + free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu); + if (drv_info->irq_pcpu) { free_percpu(drv_info->irq_pcpu); drv_info->irq_pcpu = NULL; } } -static int ffa_init_pcpu_irq(unsigned int irq) +static int ffa_init_pcpu_irq(void) { struct ffa_pcpu_irq __percpu *irq_pcpu; int ret, cpu; @@ -1402,13 +1618,31 @@ static int ffa_init_pcpu_irq(unsigned int irq) drv_info->irq_pcpu = irq_pcpu; - ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu); - if (ret) { - pr_err("Error registering notification IRQ %d: %d\n", irq, ret); - return ret; + if (drv_info->sched_recv_irq) { + ret = request_percpu_irq(drv_info->sched_recv_irq, + ffa_sched_recv_irq_handler, + "ARM-FFA-SRI", irq_pcpu); + if (ret) { + pr_err("Error registering percpu SRI nIRQ %d : %d\n", + drv_info->sched_recv_irq, ret); + drv_info->sched_recv_irq = 0; + return ret; + } + } + + if (drv_info->notif_pend_irq) { + ret = request_percpu_irq(drv_info->notif_pend_irq, + notif_pend_irq_handler, + "ARM-FFA-NPI", irq_pcpu); + if (ret) { + pr_err("Error registering percpu NPI nIRQ %d : %d\n", + drv_info->notif_pend_irq, ret); + drv_info->notif_pend_irq = 0; + return ret; + } } - INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); + INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn); INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); if (!drv_info->notif_pcpu_wq) @@ -1428,7 +1662,10 @@ static int ffa_init_pcpu_irq(unsigned int irq) static void ffa_notifications_cleanup(void) { ffa_uninit_pcpu_irq(); - ffa_sched_recv_irq_unmap(); + ffa_irq_unmap(drv_info->sched_recv_irq); + drv_info->sched_recv_irq = 0; + ffa_irq_unmap(drv_info->notif_pend_irq); + drv_info->notif_pend_irq = 0; if (drv_info->bitmap_created) { ffa_notification_bitmap_destroy(); @@ -1439,30 +1676,31 @@ static void ffa_notifications_cleanup(void) static void ffa_notifications_setup(void) { - int ret, irq; + int ret; ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); - if (ret) { - pr_info("Notifications not supported, continuing with it ..\n"); - return; - } + if (!ret) { + ret = ffa_notification_bitmap_create(); + if (ret) { + pr_err("Notification bitmap create error %d\n", ret); + return; + } - ret = ffa_notification_bitmap_create(); - if (ret) { - pr_info("Notification bitmap create error %d\n", ret); - return; + drv_info->bitmap_created = true; } - drv_info->bitmap_created = true; - irq = ffa_sched_recv_irq_map(); - if (irq <= 0) { - ret = irq; - goto cleanup; - } + ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT); + if (ret > 0) + drv_info->sched_recv_irq = ret; + + ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT); + if (ret > 0) + drv_info->notif_pend_irq = ret; - drv_info->sched_recv_irq = irq; + if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq) + goto cleanup; - ret = ffa_init_pcpu_irq(irq); + ret = ffa_init_pcpu_irq(); if (ret) goto cleanup; @@ -1479,20 +1717,16 @@ cleanup: static int __init ffa_init(void) { int ret; + u32 buf_sz; + size_t rxtx_bufsz = SZ_4K; ret = ffa_transport_init(&invoke_ffa_fn); if (ret) return ret; - ret = arm_ffa_bus_init(); - if (ret) - return ret; - drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL); - if (!drv_info) { - ret = -ENOMEM; - goto ffa_bus_exit; - } + if (!drv_info) + return -ENOMEM; ret = ffa_version_check(&drv_info->version); if (ret) @@ -1504,13 +1738,24 @@ static int __init ffa_init(void) goto free_drv_info; } - drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL); + if (!ret) { + if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1) + rxtx_bufsz = SZ_64K; + else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2) + rxtx_bufsz = SZ_16K; + else + rxtx_bufsz = SZ_4K; + } + + drv_info->rxtx_bufsz = rxtx_bufsz; + drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!drv_info->rx_buffer) { ret = -ENOMEM; goto free_pages; } - drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL); + drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL); if (!drv_info->tx_buffer) { ret = -ENOMEM; goto free_pages; @@ -1518,7 +1763,7 @@ static int __init ffa_init(void) ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer), virt_to_phys(drv_info->rx_buffer), - RXTX_BUFFER_SIZE / FFA_PAGE_SIZE); + rxtx_bufsz / FFA_PAGE_SIZE); if (ret) { pr_err("failed to register FFA RxTx buffers\n"); goto free_pages; @@ -1527,7 +1772,7 @@ static int __init ffa_init(void) mutex_init(&drv_info->rx_lock); mutex_init(&drv_info->tx_lock); - ffa_set_up_mem_ops_native_flag(); + ffa_drvinfo_flags_init(); ffa_notifications_setup(); @@ -1548,25 +1793,22 @@ cleanup_notifs: ffa_notifications_cleanup(); free_pages: if (drv_info->tx_buffer) - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + free_pages_exact(drv_info->tx_buffer, rxtx_bufsz); + free_pages_exact(drv_info->rx_buffer, rxtx_bufsz); free_drv_info: kfree(drv_info); -ffa_bus_exit: - arm_ffa_bus_exit(); return ret; } -subsys_initcall(ffa_init); +module_init(ffa_init); static void __exit ffa_exit(void) { ffa_notifications_cleanup(); ffa_partitions_cleanup(); ffa_rxtx_unmap(drv_info->vm_id); - free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); - free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz); + free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz); kfree(drv_info); - arm_ffa_bus_exit(); } module_exit(ffa_exit); |