summaryrefslogtreecommitdiff
path: root/drivers/tee
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tee')
-rw-r--r--drivers/tee/Kconfig10
-rw-r--r--drivers/tee/Makefile3
-rw-r--r--drivers/tee/amdtee/amdtee_if.h10
-rw-r--r--drivers/tee/amdtee/amdtee_private.h2
-rw-r--r--drivers/tee/amdtee/call.c34
-rw-r--r--drivers/tee/amdtee/core.c56
-rw-r--r--drivers/tee/amdtee/shm_pool.c4
-rw-r--r--drivers/tee/optee/Kconfig23
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/call.c165
-rw-r--r--drivers/tee/optee/core.c159
-rw-r--r--drivers/tee/optee/device.c29
-rw-r--r--drivers/tee/optee/ffa_abi.c314
-rw-r--r--drivers/tee/optee/notif.c11
-rw-r--r--drivers/tee/optee/optee_ffa.h51
-rw-r--r--drivers/tee/optee/optee_msg.h96
-rw-r--r--drivers/tee/optee/optee_private.h128
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h36
-rw-r--r--drivers/tee/optee/optee_smc.h63
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/rpc.c189
-rw-r--r--drivers/tee/optee/smc_abi.c514
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/tee/qcomtee/Kconfig13
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c401
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h53
-rw-r--r--drivers/tee/tee_shm.c335
-rw-r--r--drivers/tee/tee_shm_pool.c2
-rw-r--r--drivers/tee/tstee/Kconfig11
-rw-r--r--drivers/tee/tstee/Makefile3
-rw-r--r--drivers/tee/tstee/core.c480
-rw-r--r--drivers/tee/tstee/tstee_private.h92
44 files changed, 7512 insertions, 501 deletions
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 73a147202e88..98c3ad083940 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -3,8 +3,7 @@
menuconfig TEE
tristate "Trusted Execution Environment support"
depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
- select CRYPTO
- select CRYPTO_SHA1
+ select CRYPTO_LIB_SHA1
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
@@ -13,7 +12,14 @@ menuconfig TEE
if TEE
+config TEE_DMABUF_HEAPS
+ bool
+ depends on HAS_DMA && DMABUF_HEAPS
+ default y
+
source "drivers/tee/optee/Kconfig"
source "drivers/tee/amdtee/Kconfig"
+source "drivers/tee/tstee/Kconfig"
+source "drivers/tee/qcomtee/Kconfig"
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 68da044afbfa..3239b91dee96 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEE) += tee.o
tee-objs += tee_core.o
+tee-objs += tee_heap.o
tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
obj-$(CONFIG_AMDTEE) += amdtee/
+obj-$(CONFIG_ARM_TSTEE) += tstee/
+obj-$(CONFIG_QCOMTEE) += qcomtee/
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
index ff48c3e47375..e2014e21530a 100644
--- a/drivers/tee/amdtee/amdtee_if.h
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem {
/**
* struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
- * @low_addr: [in] bits [31:0] of the physical address of the TA binary
- * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
- * @size: [in] size of TA binary in bytes
- * @ta_handle: [out] return handle of the loaded TA
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ * @return_origin: [out] origin of return code after TEE processing
*/
struct tee_cmd_load_ta {
u32 low_addr;
u32 hi_addr;
u32 size;
u32 ta_handle;
+ u32 return_origin;
};
/**
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
index 6d0f7062bb87..d87050033894 100644
--- a/drivers/tee/amdtee/amdtee_private.h
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -9,7 +9,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/kref.h>
#include <linux/types.h>
#include "amdtee_if.h"
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index cec6e70f0ac9..4c21b02be4af 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -5,10 +5,10 @@
#include <linux/device.h>
#include <linux/tee.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/psp-tee.h>
#include <linux/slab.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
#include "amdtee_if.h"
#include "amdtee_private.h"
@@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
- } else if (arg->ret == TEEC_SUCCESS) {
- ret = get_ta_refcount(load_cmd.ta_handle);
- if (!ret) {
- arg->ret_origin = TEEC_ORIGIN_COMMS;
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
-
- /* Unload the TA on error */
- unload_cmd.ta_handle = load_cmd.ta_handle;
- psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
- (void *)&unload_cmd,
- sizeof(unload_cmd), &ret);
- } else {
- set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ } else {
+ arg->ret_origin = load_cmd.return_origin;
+
+ if (arg->ret == TEEC_SUCCESS) {
+ ret = get_ta_refcount(load_cmd.ta_handle);
+ if (!ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+ /* Unload the TA on error */
+ unload_cmd.ta_handle = load_cmd.ta_handle;
+ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+ (void *)&unload_cmd,
+ sizeof(unload_cmd), &ret);
+ } else {
+ set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ }
}
}
mutex_unlock(&ta_refcount_mutex);
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 297dc62bca29..fb39d9a19c69 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -3,20 +3,22 @@
* Copyright 2019 Advanced Micro Devices, Inc.
*/
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/psp-tee.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
-#include <linux/mm.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
+
#include "amdtee_private.h"
-#include "../tee_private.h"
-#include <linux/psp-tee.h>
static struct amdtee_driver_data *drv_data;
static DEFINE_MUTEX(session_list_mutex);
@@ -217,12 +219,12 @@ unlock:
return rc;
}
+/* mutex must be held by caller */
static void destroy_session(struct kref *ref)
{
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
- mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
kfree(sess);
@@ -267,35 +269,36 @@ int amdtee_open_session(struct tee_context *ctx,
goto out;
}
+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+ if (arg->ret != TEEC_SUCCESS) {
+ pr_err("open_session failed %d\n", arg->ret);
+ handle_unload_ta(ta_handle);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
+ goto out;
+ }
+
/* Find an empty session index for the given TA */
spin_lock(&sess->lock);
i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
- if (i < TEE_NUM_SESSIONS)
+ if (i < TEE_NUM_SESSIONS) {
+ sess->session_info[i] = session_info;
+ set_session_id(ta_handle, i, &arg->session);
set_bit(i, sess->sess_mask);
+ }
spin_unlock(&sess->lock);
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
rc = -ENOMEM;
goto out;
}
- /* Open session with loaded TA */
- handle_open_session(arg, &session_info, param);
- if (arg->ret != TEEC_SUCCESS) {
- pr_err("open_session failed %d\n", arg->ret);
- spin_lock(&sess->lock);
- clear_bit(i, sess->sess_mask);
- spin_unlock(&sess->lock);
- handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
- goto out;
- }
-
- sess->session_info[i] = session_info;
- set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
@@ -332,7 +335,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
return 0;
}
@@ -458,7 +461,7 @@ static int __init amdtee_driver_init(void)
rc = psp_check_tee_status();
if (rc) {
- pr_err("amd-tee driver: tee not present\n");
+ pr_err("tee not present\n");
return rc;
}
@@ -494,7 +497,6 @@ static int __init amdtee_driver_init(void)
drv_data->amdtee = amdtee;
- pr_info("amd-tee driver initialization successful\n");
return 0;
err_device_unregister:
@@ -510,7 +512,7 @@ err_kfree_drv_data:
kfree(drv_data);
drv_data = NULL;
- pr_err("amd-tee driver initialization failed\n");
+ pr_err("initialization failed\n");
return rc;
}
module_init(amdtee_driver_init);
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
index f87f96a291c9..6346e0bc8a64 100644
--- a/drivers/tee/amdtee/shm_pool.c
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -4,8 +4,8 @@
*/
#include <linux/slab.h>
-#include <linux/tee_drv.h>
-#include <linux/psp-sev.h>
+#include <linux/tee_core.h>
+#include <linux/psp.h>
#include "amdtee_private.h"
static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index f121c224e682..50d2051f7f20 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -4,6 +4,29 @@ config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
depends on MMU
+ depends on RPMB || !RPMB
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
+
+config OPTEE_INSECURE_LOAD_IMAGE
+ bool "Load OP-TEE image as firmware"
+ default n
+ depends on OPTEE && ARM64
+ help
+ This loads the BL32 image for OP-TEE as firmware when the driver is
+ probed. This returns -EPROBE_DEFER until the firmware is loadable from
+ the filesystem which is determined by checking the system_state until
+ it is in SYSTEM_RUNNING. This also requires enabling the corresponding
+ option in Trusted Firmware for Arm. The documentation there explains
+ the security threat associated with enabling this as well as
+ mitigations at the firmware and platform level.
+ https://trustedfirmware-a.readthedocs.io/en/latest/threat_model/threat_model.html
+
+ Additional documentation on kernel security risks are at
+ Documentation/tee/op-tee.rst.
+
+config OPTEE_STATIC_PROTMEM_POOL
+ bool
+ depends on HAS_IOMEM && TEE_DMABUF_HEAPS
+ default y
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index a6eff388d300..ad7049c1c107 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,6 +4,7 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += notif.o
optee-objs += rpc.o
+optee-objs += protmem.o
optee-objs += supp.o
optee-objs += device.o
optee-objs += smc_abi.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 290b1bb0e9cd..16eb953e14bb 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_private.h"
@@ -39,9 +39,29 @@ struct optee_shm_arg_entry {
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
};
+void optee_cq_init(struct optee_call_queue *cq, int thread_count)
+{
+ mutex_init(&cq->mutex);
+ INIT_LIST_HEAD(&cq->waiters);
+
+ /*
+ * If cq->total_thread_count is 0 then we're not trying to keep
+ * track of how many free threads we have, instead we're relying on
+ * the secure world to tell us when we're out of thread and have to
+ * wait for another thread to become available.
+ */
+ cq->total_thread_count = thread_count;
+ cq->free_thread_count = thread_count;
+}
+
void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+ struct optee_call_waiter *w, bool sys_thread)
{
+ unsigned int free_thread_threshold;
+ bool need_wait = false;
+
+ memset(w, 0, sizeof(*w));
+
/*
* We're preparing to make a call to secure world. In case we can't
* allocate a thread in secure world we'll end up waiting in
@@ -60,8 +80,38 @@ void optee_cq_wait_init(struct optee_call_queue *cq,
*/
init_completion(&w->c);
list_add_tail(&w->list_node, &cq->waiters);
+ w->sys_thread = sys_thread;
+
+ if (cq->total_thread_count) {
+ if (sys_thread || !cq->sys_thread_req_count)
+ free_thread_threshold = 0;
+ else
+ free_thread_threshold = 1;
+
+ if (cq->free_thread_count > free_thread_threshold)
+ cq->free_thread_count--;
+ else
+ need_wait = true;
+ }
mutex_unlock(&cq->mutex);
+
+ while (need_wait) {
+ optee_cq_wait_for_completion(cq, w);
+ mutex_lock(&cq->mutex);
+
+ if (sys_thread || !cq->sys_thread_req_count)
+ free_thread_threshold = 0;
+ else
+ free_thread_threshold = 1;
+
+ if (cq->free_thread_count > free_thread_threshold) {
+ cq->free_thread_count--;
+ need_wait = false;
+ }
+
+ mutex_unlock(&cq->mutex);
+ }
}
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
@@ -83,6 +133,14 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
{
struct optee_call_waiter *w;
+ /* Wake a waiting system session if any, prior to a normal session */
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (w->sys_thread && !completion_done(&w->c)) {
+ complete(&w->c);
+ return;
+ }
+ }
+
list_for_each_entry(w, &cq->waiters, list_node) {
if (!completion_done(&w->c)) {
complete(&w->c);
@@ -104,6 +162,8 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
/* Get out of the list */
list_del(&w->list_node);
+ cq->free_thread_count++;
+
/* Wake up one eventual waiting task */
optee_cq_complete_one(cq);
@@ -119,6 +179,28 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
mutex_unlock(&cq->mutex);
}
+/* Count registered system sessions to reserved a system thread or not */
+static bool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq)
+{
+ if (cq->total_thread_count <= 1)
+ return false;
+
+ mutex_lock(&cq->mutex);
+ cq->sys_thread_req_count++;
+ mutex_unlock(&cq->mutex);
+
+ return true;
+}
+
+static void optee_cq_decr_sys_thread_count(struct optee_call_queue *cq)
+{
+ mutex_lock(&cq->mutex);
+ cq->sys_thread_req_count--;
+ /* If there's someone waiting, let it resume */
+ optee_cq_complete_one(cq);
+ mutex_unlock(&cq->mutex);
+}
+
/* Requires the filpstate mutex to be held */
static struct optee_session *find_session(struct optee_context_data *ctxdata,
u32 session_id)
@@ -328,7 +410,8 @@ int optee_open_session(struct tee_context *ctx,
goto out;
}
- if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs,
+ sess->use_sys_thread)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -360,7 +443,29 @@ out:
return rc;
}
-int optee_close_session_helper(struct tee_context *ctx, u32 session)
+int optee_system_session(struct tee_context *ctx, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+ int rc = -EINVAL;
+
+ mutex_lock(&ctxdata->mutex);
+
+ sess = find_session(ctxdata, session);
+ if (sess && (sess->use_sys_thread ||
+ optee_cq_incr_sys_thread_count(&optee->call_queue))) {
+ sess->use_sys_thread = true;
+ rc = 0;
+ }
+
+ mutex_unlock(&ctxdata->mutex);
+
+ return rc;
+}
+
+int optee_close_session_helper(struct tee_context *ctx, u32 session,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_shm_arg_entry *entry;
@@ -374,10 +479,13 @@ int optee_close_session_helper(struct tee_context *ctx, u32 session)
msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
msg_arg->session = session;
- optee->ops->do_call_with_arg(ctx, shm, offs);
+ optee->ops->do_call_with_arg(ctx, shm, offs, system_thread);
optee_free_msg_arg(ctx, entry, offs);
+ if (system_thread)
+ optee_cq_decr_sys_thread_count(&optee->call_queue);
+
return 0;
}
@@ -385,6 +493,7 @@ int optee_close_session(struct tee_context *ctx, u32 session)
{
struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
+ bool system_thread;
/* Check that the session is valid and remove it from the list */
mutex_lock(&ctxdata->mutex);
@@ -394,9 +503,10 @@ int optee_close_session(struct tee_context *ctx, u32 session)
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
+ system_thread = sess->use_sys_thread;
kfree(sess);
- return optee_close_session_helper(ctx, session);
+ return optee_close_session_helper(ctx, session, system_thread);
}
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
@@ -408,12 +518,15 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
struct tee_shm *shm;
+ bool system_thread;
u_int offs;
int rc;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, arg->session);
+ if (sess)
+ system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
@@ -432,7 +545,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (rc)
goto out;
- if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs, system_thread)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -457,12 +570,15 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
+ bool system_thread;
struct tee_shm *shm;
u_int offs;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, session);
+ if (sess)
+ system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
@@ -474,7 +590,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
msg_arg->session = session;
msg_arg->cancel_id = cancel_id;
- optee->ops->do_call_with_arg(ctx, shm, offs);
+ optee->ops->do_call_with_arg(ctx, shm, offs, system_thread);
optee_free_msg_arg(ctx, entry, offs);
return 0;
@@ -488,7 +604,7 @@ static bool is_normal_memory(pgprot_t p)
#elif defined(CONFIG_ARM64)
return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
#else
-#error "Unuspported architecture"
+#error "Unsupported architecture"
#endif
}
@@ -524,3 +640,32 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return rc;
}
+
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = cmd;
+ optee->ops->do_call_with_arg(ctx, shm, offs, false);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+int optee_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+int optee_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 2a258bd3b6b5..5b62139714ce 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -9,74 +9,58 @@
#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/rpmb.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "optee_private.h"
-int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align,
- int (*shm_register)(struct tee_context *ctx,
- struct tee_shm *shm,
- struct page **pages,
- size_t num_pages,
- unsigned long start))
-{
- unsigned int order = get_order(size);
- struct page *page;
- int rc = 0;
+struct blocking_notifier_head optee_rpmb_intf_added =
+ BLOCKING_NOTIFIER_INIT(optee_rpmb_intf_added);
- /*
- * Ignore alignment since this is already going to be page aligned
- * and there's no need for any larger alignment.
- */
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!page)
- return -ENOMEM;
+static int rpmb_add_dev(struct device *dev)
+{
+ blocking_notifier_call_chain(&optee_rpmb_intf_added, 0,
+ to_rpmb_dev(dev));
- shm->kaddr = page_address(page);
- shm->paddr = page_to_phys(page);
- shm->size = PAGE_SIZE << order;
+ return 0;
+}
- if (shm_register) {
- unsigned int nr_pages = 1 << order, i;
- struct page **pages;
+static struct class_interface rpmb_class_intf = {
+ .add_dev = rpmb_add_dev,
+};
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
+void optee_bus_scan_rpmb(struct work_struct *work)
+{
+ struct optee *optee = container_of(work, struct optee,
+ rpmb_scan_bus_work);
+ int ret;
+
+ if (!optee->rpmb_scan_bus_done) {
+ ret = optee_enumerate_devices(PTA_CMD_GET_DEVICES_RPMB);
+ optee->rpmb_scan_bus_done = !ret;
+ if (ret && ret != -ENODEV)
+ pr_info("Scanning for RPMB device: ret %d\n", ret);
+ }
+}
- for (i = 0; i < nr_pages; i++)
- pages[i] = page + i;
+int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
+ void *data)
+{
+ struct optee *optee = container_of(intf, struct optee, rpmb_intf);
- rc = shm_register(shm->ctx, shm, pages, nr_pages,
- (unsigned long)shm->kaddr);
- kfree(pages);
- if (rc)
- goto err;
- }
+ schedule_work(&optee->rpmb_scan_bus_work);
return 0;
-
-err:
- free_pages((unsigned long)shm->kaddr, order);
- return rc;
}
-void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- int (*shm_unregister)(struct tee_context *ctx,
- struct tee_shm *shm))
+int optee_set_dma_mask(struct optee *optee, u_int pa_width)
{
- if (shm_unregister)
- shm_unregister(shm->ctx, shm);
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
- shm->kaddr = NULL;
+ u64 mask = DMA_BIT_MASK(min(64, pa_width));
+
+ return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask);
}
static void optee_bus_scan(struct work_struct *work)
@@ -84,6 +68,34 @@ static void optee_bus_scan(struct work_struct *work)
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
}
+static ssize_t rpmb_routing_model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct optee *optee = dev_get_drvdata(dev);
+ const char *s;
+
+ if (optee->in_kernel_rpmb_routing)
+ s = "kernel";
+ else
+ s = "user";
+
+ return sysfs_emit(buf, "%s\n", s);
+}
+static DEVICE_ATTR_RO(rpmb_routing_model);
+
+static struct attribute *optee_dev_attrs[] = {
+ &dev_attr_rpmb_routing_model.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(optee_dev);
+
+void optee_set_dev_group(struct optee *optee)
+{
+ tee_device_set_dev_groups(optee->teedev, optee_dev_groups);
+ tee_device_set_dev_groups(optee->supp_teedev, optee_dev_groups);
+}
+
int optee_open(struct tee_context *ctx, bool cap_memref_null)
{
struct optee_context_data *ctxdata;
@@ -110,12 +122,7 @@ int optee_open(struct tee_context *ctx, bool cap_memref_null)
if (!optee->scan_bus_done) {
INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
- optee->scan_bus_wq = create_workqueue("optee_bus_scan");
- if (!optee->scan_bus_wq) {
- kfree(ctxdata);
- return -ECHILD;
- }
- queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ schedule_work(&optee->scan_bus_work);
optee->scan_bus_done = true;
}
}
@@ -129,7 +136,8 @@ int optee_open(struct tee_context *ctx, bool cap_memref_null)
static void optee_release_helper(struct tee_context *ctx,
int (*close_session)(struct tee_context *ctx,
- u32 session))
+ u32 session,
+ bool system_thread))
{
struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
@@ -141,7 +149,7 @@ static void optee_release_helper(struct tee_context *ctx,
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
list_node) {
list_del(&sess->list_node);
- close_session(ctx, sess->session_id);
+ close_session(ctx, sess->session_id, sess->use_sys_thread);
kfree(sess);
}
kfree(ctxdata);
@@ -158,15 +166,15 @@ void optee_release_supp(struct tee_context *ctx)
struct optee *optee = tee_get_drvdata(ctx->teedev);
optee_release_helper(ctx, optee_close_session_helper);
- if (optee->scan_bus_wq) {
- destroy_workqueue(optee->scan_bus_wq);
- optee->scan_bus_wq = NULL;
- }
+
optee_supp_release(&optee->supp);
}
void optee_remove_common(struct optee *optee)
{
+ blocking_notifier_chain_unregister(&optee_rpmb_intf_added,
+ &optee->rpmb_intf);
+ cancel_work_sync(&optee->rpmb_scan_bus_work);
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
@@ -183,13 +191,18 @@ void optee_remove_common(struct optee *optee)
tee_shm_pool_free(optee->pool);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
+ rpmb_dev_put(optee->rpmb_dev);
+ mutex_destroy(&optee->rpmb_dev_mutex);
}
static int smc_abi_rc;
static int ffa_abi_rc;
+static bool intf_is_regged;
static int __init optee_core_init(void)
{
+ int rc;
+
/*
* The kernel may have crashed at the same time that all available
* secure world threads were suspended and we cannot reschedule the
@@ -200,18 +213,36 @@ static int __init optee_core_init(void)
if (is_kdump_kernel())
return -ENODEV;
+ if (IS_REACHABLE(CONFIG_RPMB)) {
+ rc = rpmb_interface_register(&rpmb_class_intf);
+ if (rc)
+ return rc;
+ intf_is_regged = true;
+ }
+
smc_abi_rc = optee_smc_abi_register();
ffa_abi_rc = optee_ffa_abi_register();
/* If both failed there's no point with this module */
- if (smc_abi_rc && ffa_abi_rc)
+ if (smc_abi_rc && ffa_abi_rc) {
+ if (IS_REACHABLE(CONFIG_RPMB)) {
+ rpmb_interface_unregister(&rpmb_class_intf);
+ intf_is_regged = false;
+ }
return smc_abi_rc;
+ }
+
return 0;
}
module_init(optee_core_init);
static void __exit optee_core_exit(void)
{
+ if (IS_REACHABLE(CONFIG_RPMB) && intf_is_regged) {
+ rpmb_interface_unregister(&rpmb_class_intf);
+ intf_is_regged = false;
+ }
+
if (!smc_abi_rc)
optee_smc_abi_unregister();
if (!ffa_abi_rc)
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 64f0e047c23d..950b4661d5df 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uuid.h>
#include "optee_private.h"
@@ -43,6 +43,13 @@ static int get_devices(struct tee_context *ctx, u32 session,
ret = tee_client_invoke_func(ctx, &inv_arg, param);
if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
(inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ /*
+ * TEE_ERROR_STORAGE_NOT_AVAILABLE is returned when getting
+ * the list of device TAs that depends on RPMB but a usable
+ * RPMB device isn't found.
+ */
+ if (inv_arg.ret == TEE_ERROR_STORAGE_NOT_AVAILABLE)
+ return -ENODEV;
pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
inv_arg.ret);
return -EINVAL;
@@ -60,7 +67,16 @@ static void optee_release_device(struct device *dev)
kfree(optee_device);
}
-static int optee_register_device(const uuid_t *device_uuid)
+static ssize_t need_supplicant_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static DEVICE_ATTR_RO(need_supplicant);
+
+static int optee_register_device(const uuid_t *device_uuid, u32 func)
{
struct tee_client_device *optee_device = NULL;
int rc;
@@ -81,9 +97,14 @@ static int optee_register_device(const uuid_t *device_uuid)
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
put_device(&optee_device->dev);
+ return rc;
}
- return rc;
+ if (func == PTA_CMD_GET_DEVICES_SUPP)
+ device_create_file(&optee_device->dev,
+ &dev_attr_need_supplicant);
+
+ return 0;
}
static int __optee_enumerate_devices(u32 func)
@@ -142,7 +163,7 @@ static int __optee_enumerate_devices(u32 func)
num_devices = shm_size / sizeof(uuid_t);
for (idx = 0; idx < num_devices; idx++) {
- rc = optee_register_device(&device_uuid[idx]);
+ rc = optee_register_device(&device_uuid[idx], func);
if (rc)
goto out_shm;
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 0828240f27e6..bf8390789ecf 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -1,17 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2021, 2023 Linaro Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/arm_ffa.h>
#include <linux/errno.h>
+#include <linux/rpmb.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_private.h"
#include "optee_ffa.h"
@@ -374,14 +375,14 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
struct tee_shm *shm, size_t size, size_t align)
{
- return optee_pool_op_alloc_helper(pool, shm, size, align,
- optee_ffa_shm_register);
+ return tee_dyn_shm_alloc_helper(shm, size, align,
+ optee_ffa_shm_register);
}
static void pool_ffa_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
- optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
+ tee_dyn_shm_free_helper(shm, optee_ffa_shm_unregister);
}
static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
@@ -528,7 +529,8 @@ static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
static int optee_ffa_yielding_call(struct tee_context *ctx,
struct ffa_send_direct_data *data,
- struct optee_msg_arg *rpc_arg)
+ struct optee_msg_arg *rpc_arg,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
@@ -541,7 +543,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
int rc;
/* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, system_thread);
while (true) {
rc = msg_ops->sync_send_receive(ffa_dev, data);
if (rc)
@@ -604,6 +606,7 @@ done:
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
* @offs: offset of the message in @shm
+ * @system_thread: true if caller requests TEE system thread support
*
* Does a FF-A call to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -612,7 +615,8 @@ done:
*/
static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm, u_int offs)
+ struct tee_shm *shm, u_int offs,
+ bool system_thread)
{
struct ffa_send_direct_data data = {
.data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
@@ -642,7 +646,125 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
if (IS_ERR(rpc_arg))
return PTR_ERR(rpc_arg);
- return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread);
+}
+
+static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = cookie;
+ msg_arg->params[0].u.value.b = use_case;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
+static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attrs, unsigned int ma_count,
+ u32 use_case)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ struct ffa_send_direct_data data;
+ struct ffa_mem_region_attributes *mem_attr;
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .tag = use_case,
+ };
+ struct page *page;
+ struct scatterlist sgl;
+ unsigned int n;
+ int rc;
+
+ mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL);
+ for (n = 0; n < ma_count; n++) {
+ mem_attr[n].receiver = mem_attrs[n] & U16_MAX;
+ mem_attr[n].attrs = mem_attrs[n] >> 16;
+ }
+ args.attrs = mem_attr;
+ args.nattrs = ma_count;
+
+ page = phys_to_page(protmem->paddr);
+ sg_init_table(&sgl, 1);
+ sg_set_page(&sgl, page, protmem->size, 0);
+
+ args.sg = &sgl;
+ rc = mem_ops->memory_lend(&args);
+ kfree(mem_attr);
+ if (rc)
+ return rc;
+
+ rc = do_call_lend_protmem(optee, args.g_handle, use_case);
+ if (rc)
+ goto err_reclaim;
+
+ rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle);
+ if (rc)
+ goto err_unreg;
+
+ protmem->sec_world_id = args.g_handle;
+
+ return 0;
+
+err_unreg:
+ data = (struct ffa_send_direct_data){
+ .data0 = OPTEE_FFA_RELEASE_PROTMEM,
+ .data1 = (u32)args.g_handle,
+ .data2 = (u32)(args.g_handle >> 32),
+ };
+ msg_ops->sync_send_receive(ffa_dev, &data);
+err_reclaim:
+ mem_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+}
+
+static int optee_ffa_reclaim_protmem(struct optee *optee,
+ struct tee_shm *protmem)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ u64 global_handle = protmem->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_RELEASE_PROTMEM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ protmem->sec_world_id = 0;
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
}
/*
@@ -653,11 +775,13 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
* with a matching configuration.
*/
-static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+static bool optee_ffa_api_is_compatible(struct ffa_device *ffa_dev,
const struct ffa_ops *ops)
{
const struct ffa_msg_ops *msg_ops = ops->msg_ops;
- struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_GET_API_VERSION,
+ };
int rc;
msg_ops->mode_32bit_set(ffa_dev);
@@ -674,7 +798,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
return false;
}
- data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
+ data = (struct ffa_send_direct_data){
+ .data0 = OPTEE_FFA_GET_OS_VERSION,
+ };
rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d\n", rc);
@@ -692,9 +818,12 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
const struct ffa_ops *ops,
u32 *sec_caps,
- unsigned int *rpc_param_count)
+ unsigned int *rpc_param_count,
+ unsigned int *max_notif_value)
{
- struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES,
+ };
int rc;
rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
@@ -709,10 +838,48 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
*rpc_param_count = (u8)data.data1;
*sec_caps = data.data2;
+ if (data.data3)
+ *max_notif_value = data.data3;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
return true;
}
+static void notif_work_fn(struct work_struct *work)
+{
+ struct optee_ffa *optee_ffa = container_of(work, struct optee_ffa,
+ notif_work);
+ struct optee *optee = container_of(optee_ffa, struct optee, ffa);
+
+ optee_do_bottom_half(optee->ctx);
+}
+
+static void notif_callback(int notify_id, void *cb_data)
+{
+ struct optee *optee = cb_data;
+
+ if (notify_id == optee->ffa.bottom_half_value)
+ queue_work(optee->ffa.notif_wq, &optee->ffa.notif_work);
+ else
+ optee_notif_send(optee, notify_id);
+}
+
+static int enable_async_notif(struct optee *optee)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_ENABLE_ASYNC_NOTIF,
+ .data1 = optee->ffa.bottom_half_value,
+ };
+ int rc;
+
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ return rc;
+ return data.data0;
+}
+
static void optee_ffa_get_version(struct tee_device *teedev,
struct tee_ioctl_version_data *vers)
{
@@ -770,12 +937,20 @@ static const struct optee_ops optee_ffa_ops = {
.do_call_with_arg = optee_ffa_do_call_with_arg,
.to_msg_param = optee_ffa_to_msg_param,
.from_msg_param = optee_ffa_from_msg_param,
+ .lend_protmem = optee_ffa_lend_protmem,
+ .reclaim_protmem = optee_ffa_reclaim_protmem,
};
static void optee_ffa_remove(struct ffa_device *ffa_dev)
{
struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
+ u32 bottom_half_id = optee->ffa.bottom_half_value;
+ if (bottom_half_id != U32_MAX) {
+ ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+ bottom_half_id);
+ destroy_workqueue(optee->ffa.notif_wq);
+ }
optee_remove_common(optee);
mutex_destroy(&optee->ffa.mutex);
@@ -784,9 +959,82 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
kfree(optee);
}
+static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev,
+ struct optee *optee)
+{
+ bool is_per_vcpu = false;
+ u32 notif_id = 0;
+ int rc;
+
+ INIT_WORK(&optee->ffa.notif_work, notif_work_fn);
+ optee->ffa.notif_wq = create_workqueue("optee_notification");
+ if (!optee->ffa.notif_wq) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ while (true) {
+ rc = ffa_dev->ops->notifier_ops->notify_request(ffa_dev,
+ is_per_vcpu,
+ notif_callback,
+ optee,
+ notif_id);
+ if (!rc)
+ break;
+ /*
+ * -EACCES means that the notification ID was
+ * already bound, try the next one as long as we
+ * haven't reached the max. Any other error is a
+ * permanent error, so skip asynchronous
+ * notifications in that case.
+ */
+ if (rc != -EACCES)
+ goto err_wq;
+ notif_id++;
+ if (notif_id >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE)
+ goto err_wq;
+ }
+ optee->ffa.bottom_half_value = notif_id;
+
+ rc = enable_async_notif(optee);
+ if (rc < 0)
+ goto err_rel;
+
+ return 0;
+err_rel:
+ ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, notif_id);
+err_wq:
+ destroy_workqueue(optee->ffa.notif_wq);
+err:
+ optee->ffa.bottom_half_value = U32_MAX;
+
+ return rc;
+}
+
+static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps)
+{
+ enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+ struct tee_protmem_pool *pool;
+ int rc = 0;
+
+ if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) {
+ pool = optee_protmem_alloc_dyn_pool(optee, id);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rc = tee_device_register_dma_heap(optee->teedev, id, pool);
+ if (rc)
+ pool->ops->destroy_pool(pool);
+ }
+
+ return rc;
+}
+
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
+ const struct ffa_notifier_ops *notif_ops;
const struct ffa_ops *ffa_ops;
+ unsigned int max_notif_value;
unsigned int rpc_param_count;
struct tee_shm_pool *pool;
struct tee_device *teedev;
@@ -797,12 +1045,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
int rc;
ffa_ops = ffa_dev->ops;
+ notif_ops = ffa_ops->notifier_ops;
- if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
+ if (!optee_ffa_api_is_compatible(ffa_dev, ffa_ops))
return -EINVAL;
if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
- &rpc_param_count))
+ &rpc_param_count, &max_notif_value))
return -EINVAL;
if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
@@ -820,13 +1069,18 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
+ optee->ffa.bottom_half_value = U32_MAX;
optee->rpc_param_count = rpc_param_count;
+ if (IS_REACHABLE(CONFIG_RPMB) &&
+ (sec_caps & OPTEE_FFA_SEC_CAP_RPMB_PROBE))
+ optee->in_kernel_rpmb_routing = true;
+
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err_free_pool;
+ goto err_free_shm_pool;
}
optee->teedev = teedev;
@@ -838,6 +1092,8 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
}
optee->supp_teedev = teedev;
+ optee_set_dev_group(optee);
+
rc = tee_device_register(optee->teedev);
if (rc)
goto err_unreg_supp_teedev;
@@ -850,10 +1106,10 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (rc)
goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_cq_init(&optee->call_queue, 0);
optee_supp_init(&optee->supp);
optee_shm_arg_cache_init(optee, arg_cache_flags);
+ mutex_init(&optee->rpmb_dev_mutex);
ffa_dev_set_drvdata(ffa_dev, optee);
ctx = teedev_open(optee->teedev);
if (IS_ERR(ctx)) {
@@ -864,21 +1120,39 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
if (rc)
goto err_close_ctx;
+ if (sec_caps & OPTEE_FFA_SEC_CAP_ASYNC_NOTIF) {
+ rc = optee_ffa_async_notif_init(ffa_dev, optee);
+ if (rc < 0)
+ pr_err("Failed to initialize async notifications: %d",
+ rc);
+ }
+
+ if (optee_ffa_protmem_pool_init(optee, sec_caps))
+ pr_info("Protected memory service not available\n");
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc)
goto err_unregister_devices;
+ INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb);
+ optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev;
+ blocking_notifier_chain_register(&optee_rpmb_intf_added,
+ &optee->rpmb_intf);
pr_info("initialized driver\n");
return 0;
err_unregister_devices:
optee_unregister_devices();
+ if (optee->ffa.bottom_half_value != U32_MAX)
+ notif_ops->notify_relinquish(ffa_dev,
+ optee->ffa.bottom_half_value);
optee_notif_uninit(optee);
err_close_ctx:
teedev_close_context(ctx);
err_rhashtable_free:
rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ rpmb_dev_put(optee->rpmb_dev);
+ mutex_destroy(&optee->rpmb_dev_mutex);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
mutex_destroy(&optee->ffa.mutex);
@@ -886,7 +1160,7 @@ err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
err_unreg_teedev:
tee_device_unregister(optee->teedev);
-err_free_pool:
+err_free_shm_pool:
tee_shm_pool_free(pool);
err_free_optee:
kfree(optee);
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
index 05212842b0a5..1970880c796f 100644
--- a/drivers/tee/optee/notif.c
+++ b/drivers/tee/optee/notif.c
@@ -9,7 +9,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include "optee_private.h"
struct notif_entry {
@@ -29,7 +29,7 @@ static bool have_key(struct optee *optee, u_int key)
return false;
}
-int optee_notif_wait(struct optee *optee, u_int key)
+int optee_notif_wait(struct optee *optee, u_int key, u32 timeout)
{
unsigned long flags;
struct notif_entry *entry;
@@ -70,7 +70,12 @@ int optee_notif_wait(struct optee *optee, u_int key)
* Unlock temporarily and wait for completion.
*/
spin_unlock_irqrestore(&optee->notif.lock, flags);
- wait_for_completion(&entry->c);
+ if (timeout != 0) {
+ if (!wait_for_completion_timeout(&entry->c, timeout))
+ rc = -ETIMEDOUT;
+ } else {
+ wait_for_completion(&entry->c);
+ }
spin_lock_irqsave(&optee->notif.lock, flags);
list_del(&entry->link);
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index 97266243deaa..cc257e7956a3 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (c) 2019-2021, Linaro Limited
+ * Copyright (c) 2019-2021, 2023 Linaro Limited
*/
/*
@@ -73,7 +73,7 @@
*
* Call register usage:
* w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*
* Return register usage:
* w3: Error code, 0 on success
@@ -81,15 +81,21 @@
* as the second MSG arg struct for
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
* Bit[31:8]: Reserved (MBZ)
- * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
- * unused bits MBZ.
- * w6-w7: Not used (MBZ)
+ * w5: Bitfield of OP-TEE capabilities OPTEE_FFA_SEC_CAP_*
+ * w6: The maximum secure world notification number
+ * w7: Not used (MBZ)
*/
/*
* Secure world supports giving an offset into the argument shared memory
* object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
*/
#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
+/* OP-TEE supports asynchronous notification via FF-A */
+#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
+/* OP-TEE supports probing for RPMB device if needed */
+#define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2)
+/* OP-TEE supports Protected Memory for secure data path */
+#define OPTEE_FFA_SEC_CAP_PROTMEM BIT(3)
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
@@ -104,11 +110,44 @@
*
* Return register usage:
* w3: Error code, 0 on success
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*/
#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
/*
+ * Inform OP-TEE that the normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF
+ * w4: Notification value to request bottom half processing, should be
+ * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE
+ * w5-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Not used (MBZ)
+ */
+#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
+
+#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+
+/*
+ * Release Protected memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_RECLAIM_PROTMEM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_RELEASE_PROTMEM OPTEE_FFA_BLOCKING_CALL(8)
+
+/*
* Call with struct optee_msg_arg as argument in the supplied shared memory
* with a zero internal offset and normal cached memory attributes.
* Register usage:
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 70e9cc2ee96b..838e1d4a22f0 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -133,13 +133,13 @@ struct optee_msg_param_rmem {
};
/**
- * struct optee_msg_param_fmem - ffa memory reference parameter
+ * struct optee_msg_param_fmem - FF-A memory reference parameter
* @offs_lower: Lower bits of offset into shared memory reference
* @offs_upper: Upper bits of offset into shared memory reference
* @internal_offs: Internal offset into the first page of shared memory
* reference
* @size: Size of the buffer
- * @global_id: Global identifier of Shared memory
+ * @global_id: Global identifier of the shared memory
*/
struct optee_msg_param_fmem {
u32 offs_low;
@@ -165,7 +165,7 @@ struct optee_msg_param_value {
* @attr: attributes
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
- * @fmem: parameter by ffa registered memory reference
+ * @fmem: parameter by FF-A registered memory reference
* @value: parameter by opaque value
* @octets: parameter by octet string
*
@@ -241,11 +241,23 @@ struct optee_msg_arg {
* 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
* Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
* OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
+ *
+ * In the case where the OP-TEE image is loaded by the kernel, this will
+ * initially return an alternate UID to reflect that we are communicating with
+ * the TF-A image loading service at that time instead of OP-TEE. That UID is:
+ * a3fbeab1-1246-315d-c7c4-06b9c03cbea4.
+ * Represented in 4 32-bit words in OPTEE_MSG_IMAGE_LOAD_UID_0,
+ * OPTEE_MSG_IMAGE_LOAD_UID_1, OPTEE_MSG_IMAGE_LOAD_UID_2,
+ * OPTEE_MSG_IMAGE_LOAD_UID_3.
*/
#define OPTEE_MSG_UID_0 0x384fb3e0
#define OPTEE_MSG_UID_1 0xe7f811e3
#define OPTEE_MSG_UID_2 0xaf630002
#define OPTEE_MSG_UID_3 0xa5d5c51b
+#define OPTEE_MSG_IMAGE_LOAD_UID_0 0xa3fbeab1
+#define OPTEE_MSG_IMAGE_LOAD_UID_1 0x1246315d
+#define OPTEE_MSG_IMAGE_LOAD_UID_2 0xc7c406b9
+#define OPTEE_MSG_IMAGE_LOAD_UID_3 0xc03cbea4
#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
/*
@@ -285,6 +297,18 @@ struct optee_msg_arg {
#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
/*
+ * Values used in OPTEE_MSG_CMD_LEND_PROTMEM below
+ * OPTEE_MSG_PROTMEM_RESERVED Reserved
+ * OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY Secure Video Playback
+ * OPTEE_MSG_PROTMEM_TRUSTED_UI Trused UI
+ * OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD Secure Video Recording
+ */
+#define OPTEE_MSG_PROTMEM_RESERVED 0
+#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY 1
+#define OPTEE_MSG_PROTMEM_TRUSTED_UI 2
+#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD 3
+
+/*
* Do a secure call with struct optee_msg_arg as argument
* The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
*
@@ -325,15 +349,63 @@ struct optee_msg_arg {
* OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
* normal world unable to process asynchronous notifications. Typically
* used when the driver is shut down.
+ *
+ * OPTEE_MSG_CMD_LEND_PROTMEM lends protected memory. The passed normal
+ * physical memory is protected from normal world access. The memory
+ * should be unmapped prior to this call since it becomes inaccessible
+ * during the request.
+ * Parameters are passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a OPTEE_MSG_PROTMEM_* defined above
+ * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [in] param[1].u.tmem.buf_ptr physical address
+ * [in] param[1].u.tmem.size size
+ * [in] param[1].u.tmem.shm_ref holds protected memory reference
+ *
+ * OPTEE_MSG_CMD_RECLAIM_PROTMEM reclaims a previously lent protected
+ * memory reference. The physical memory is accessible by the normal world
+ * after this function has return and can be mapped again. The information
+ * is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a holds protected memory cookie
+ *
+ * OPTEE_MSG_CMD_GET_PROTMEM_CONFIG get configuration for a specific
+ * protected memory use case. Parameters are passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INOUT
+ * [in] param[0].value.a OPTEE_MSG_PROTMEM_*
+ * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_{R,F}MEM_OUTPUT
+ * [in] param[1].u.{r,f}mem Buffer or NULL
+ * [in] param[1].u.{r,f}mem.size Provided size of buffer or 0 for query
+ * output for the protected use case:
+ * [out] param[0].value.a Minimal size of protected memory
+ * [out] param[0].value.b Required alignment of size and start of
+ * protected memory
+ * [out] param[0].value.c PA width, max 64
+ * [out] param[1].{r,f}mem.size Size of output data
+ * [out] param[1].{r,f}mem If non-NULL, contains an array of
+ * uint32_t memory attributes that must be
+ * included when lending memory for this
+ * use case
+ *
+ * OPTEE_MSG_CMD_ASSIGN_PROTMEM assigns use-case to protected memory
+ * previously lent using the FFA_LEND framework ABI. Parameters are passed
+ * as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a holds protected memory cookie
+ * [in] param[0].u.value.b OPTEE_MSG_PROTMEM_* defined above
*/
-#define OPTEE_MSG_CMD_OPEN_SESSION 0
-#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
-#define OPTEE_MSG_CMD_CLOSE_SESSION 2
-#define OPTEE_MSG_CMD_CANCEL 3
-#define OPTEE_MSG_CMD_REGISTER_SHM 4
-#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
-#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
-#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
-#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
+#define OPTEE_MSG_CMD_LEND_PROTMEM 8
+#define OPTEE_MSG_CMD_RECLAIM_PROTMEM 9
+#define OPTEE_MSG_CMD_GET_PROTMEM_CONFIG 10
+#define OPTEE_MSG_CMD_ASSIGN_PROTMEM 11
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 04ae58892608..db9ea673fbca 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -1,15 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
*/
#ifndef OPTEE_PRIVATE_H
#define OPTEE_PRIVATE_H
#include <linux/arm-smccc.h>
+#include <linux/notifier.h>
#include <linux/rhashtable.h>
+#include <linux/rpmb.h>
#include <linux/semaphore.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_msg.h"
@@ -20,12 +22,17 @@
/* Some Global Platform error codes used in this driver */
#define TEEC_SUCCESS 0x00000000
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_ITEM_NOT_FOUND 0xFFFF0008
#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
#define TEEC_ERROR_BUSY 0xFFFF000D
#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
+/* API Return Codes are from the GP TEE Internal Core API Specification */
+#define TEE_ERROR_TIMEOUT 0xFFFF3001
+#define TEE_ERROR_STORAGE_NOT_AVAILABLE 0xF0100003
+
#define TEEC_ORIGIN_COMMS 0x00000002
/*
@@ -40,15 +47,33 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
struct arm_smccc_res *);
+/*
+ * struct optee_call_waiter - TEE entry may need to wait for a free TEE thread
+ * @list_node Reference in waiters list
+ * @c Waiting completion reference
+ * @sys_thread True if waiter belongs to a system thread
+ */
struct optee_call_waiter {
struct list_head list_node;
struct completion c;
+ bool sys_thread;
};
+/*
+ * struct optee_call_queue - OP-TEE call queue management
+ * @mutex Serializes access to this struct
+ * @waiters List of threads waiting to enter OP-TEE
+ * @total_thread_count Overall number of thread context in OP-TEE or 0
+ * @free_thread_count Number of threads context free in OP-TEE
+ * @sys_thread_req_count Number of registered system thread sessions
+ */
struct optee_call_queue {
/* Serializes access to this struct */
struct mutex mutex;
struct list_head waiters;
+ int total_thread_count;
+ int free_thread_count;
+ int sys_thread_req_count;
};
struct optee_notif {
@@ -94,26 +119,54 @@ struct optee_supp {
struct completion reqs_c;
};
+/*
+ * struct optee_pcpu - per cpu notif private struct passed to work functions
+ * @optee optee device reference
+ */
+struct optee_pcpu {
+ struct optee *optee;
+};
+
+/*
+ * struct optee_smc - optee smc communication struct
+ * @invoke_fn handler function to invoke secure monitor
+ * @memremaped_shm virtual address of memory in shared memory pool
+ * @sec_caps: secure world capabilities defined by
+ * OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @notif_irq interrupt used as async notification by OP-TEE or 0
+ * @optee_pcpu per_cpu optee instance for per cpu work or NULL
+ * @notif_pcpu_wq workqueue for per cpu asynchronous notification or NULL
+ * @notif_pcpu_work work for per cpu asynchronous notification
+ * @notif_cpuhp_state CPU hotplug state assigned for pcpu interrupt management
+ */
struct optee_smc {
optee_invoke_fn *invoke_fn;
void *memremaped_shm;
u32 sec_caps;
unsigned int notif_irq;
+ struct optee_pcpu __percpu *optee_pcpu;
+ struct workqueue_struct *notif_pcpu_wq;
+ struct work_struct notif_pcpu_work;
+ unsigned int notif_cpuhp_state;
};
/**
* struct optee_ffa_data - FFA communication struct
* @ffa_dev FFA device, contains the destination id, the id of
* OP-TEE in secure world
- * @ffa_ops FFA operations
+ * @bottom_half_value Notification ID used for bottom half signalling or
+ * U32_MAX if unused
* @mutex Serializes access to @global_ids
* @global_ids FF-A shared memory global handle translation
*/
struct optee_ffa {
struct ffa_device *ffa_dev;
+ u32 bottom_half_value;
/* Serializes access to @global_ids */
struct mutex mutex;
struct rhashtable global_ids;
+ struct workqueue_struct *notif_wq;
+ struct work_struct notif_work;
};
struct optee;
@@ -123,20 +176,30 @@ struct optee;
* @do_call_with_arg: enters OP-TEE in secure world
* @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
* @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ * @lend_protmem: lends physically contiguous memory as restricted
+ * memory, inaccessible by the kernel
+ * @reclaim_protmem: reclaims restricted memory previously lent with
+ * @lend_protmem() and makes it accessible by the
+ * kernel again
*
* These OPs are only supposed to be used internally in the OP-TEE driver
- * as a way of abstracting the different methogs of entering OP-TEE in
+ * as a way of abstracting the different methods of entering OP-TEE in
* secure world.
*/
struct optee_ops {
int (*do_call_with_arg)(struct tee_context *ctx,
- struct tee_shm *shm_arg, u_int offs);
+ struct tee_shm *shm_arg, u_int offs,
+ bool system_thread);
int (*to_msg_param)(struct optee *optee,
struct optee_msg_param *msg_params,
size_t num_params, const struct tee_param *params);
int (*from_msg_param)(struct optee *optee, struct tee_param *params,
size_t num_params,
const struct optee_msg_param *msg_params);
+ int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attr, unsigned int ma_count,
+ u32 use_case);
+ int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem);
};
/**
@@ -152,9 +215,14 @@ struct optee_ops {
* @notif: notification synchronization struct
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
+ * @mutex: mutex protecting @rpmb_dev
+ * @rpmb_dev: current RPMB device or NULL
+ * @rpmb_scan_bus_done flag if device registation of RPMB dependent devices
+ * was already done
+ * @rpmb_scan_bus_work workq to for an RPMB device and to scan optee bus
+ * and register RPMB dependent optee drivers
* @rpc_param_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
- * @scan_bus_wq workqueue to scan optee bus and register optee drivers
* @scan_bus_work workq to scan optee bus and register optee drivers
*/
struct optee {
@@ -171,15 +239,22 @@ struct optee {
struct optee_notif notif;
struct optee_supp supp;
struct tee_shm_pool *pool;
+ /* Protects rpmb_dev pointer */
+ struct mutex rpmb_dev_mutex;
+ struct rpmb_dev *rpmb_dev;
+ struct notifier_block rpmb_intf;
unsigned int rpc_param_count;
- bool scan_bus_done;
- struct workqueue_struct *scan_bus_wq;
+ bool scan_bus_done;
+ bool rpmb_scan_bus_done;
+ bool in_kernel_rpmb_routing;
struct work_struct scan_bus_work;
+ struct work_struct rpmb_scan_bus_work;
};
struct optee_session {
struct list_head list_node;
u32 session_id;
+ bool use_sys_thread;
};
struct optee_context_data {
@@ -206,19 +281,23 @@ struct optee_call_ctx {
size_t num_entries;
};
+extern struct blocking_notifier_head optee_rpmb_intf_added;
+
+int optee_set_dma_mask(struct optee *optee, u_int pa_width);
+
int optee_notif_init(struct optee *optee, u_int max_key);
void optee_notif_uninit(struct optee *optee);
-int optee_notif_wait(struct optee *optee, u_int key);
+int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param);
-int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
-int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
+struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
+ enum tee_dma_heap_id id);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
@@ -228,7 +307,9 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
-int optee_close_session_helper(struct tee_context *ctx, u32 session);
+int optee_system_session(struct tee_context *ctx, u32 session);
+int optee_close_session_helper(struct tee_context *ctx, u32 session,
+ bool system_thread);
int optee_close_session(struct tee_context *ctx, u32 session);
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
@@ -236,21 +317,14 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
#define PTA_CMD_GET_DEVICES 0x0
#define PTA_CMD_GET_DEVICES_SUPP 0x1
+#define PTA_CMD_GET_DEVICES_RPMB 0x2
int optee_enumerate_devices(u32 func);
void optee_unregister_devices(void);
+void optee_bus_scan_rpmb(struct work_struct *work);
+int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
+ void *data);
-int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align,
- int (*shm_register)(struct tee_context *ctx,
- struct tee_shm *shm,
- struct page **pages,
- size_t num_pages,
- unsigned long start));
-void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- int (*shm_unregister)(struct tee_context *ctx,
- struct tee_shm *shm));
-
-
+void optee_set_dev_group(struct optee *optee);
void optee_remove_common(struct optee *optee);
int optee_open(struct tee_context *ctx, bool cap_memref_null);
void optee_release(struct tee_context *ctx);
@@ -276,8 +350,9 @@ static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
mp->u.value.c = p->u.value.c;
}
+void optee_cq_init(struct optee_call_queue *cq, int thread_count);
void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w);
+ struct optee_call_waiter *w, bool sys_thread);
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
struct optee_call_waiter *w);
void optee_cq_wait_final(struct optee_call_queue *cq,
@@ -301,6 +376,9 @@ void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg);
+int optee_do_bottom_half(struct tee_context *ctx);
+int optee_stop_async_notif(struct tee_context *ctx);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
index f3f06e0994a7..87a59cc03480 100644
--- a/drivers/tee/optee/optee_rpc_cmd.h
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -41,6 +41,7 @@
* Waiting on notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
* [in] value[0].b notification value
+ * [in] value[0].c timeout in milliseconds or 0 if no timeout
*
* Sending a synchronous notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
@@ -103,4 +104,39 @@
/* I2C master control flags */
#define OPTEE_RPC_I2C_FLAGS_TEN_BIT BIT(0)
+/*
+ * Reset RPMB probing
+ *
+ * Releases an eventually already used RPMB devices and starts over searching
+ * for RPMB devices. Returns the kind of shared memory to use in subsequent
+ * OPTEE_RPC_CMD_RPMB_PROBE_NEXT and OPTEE_RPC_CMD_RPMB calls.
+ *
+ * [out] value[0].a OPTEE_RPC_SHM_TYPE_*, the parameter for
+ * OPTEE_RPC_CMD_SHM_ALLOC
+ */
+#define OPTEE_RPC_CMD_RPMB_PROBE_RESET 22
+
+/*
+ * Probe next RPMB device
+ *
+ * [out] value[0].a Type of RPMB device, OPTEE_RPC_RPMB_*
+ * [out] value[0].b EXT CSD-slice 168 "RPMB Size"
+ * [out] value[0].c EXT CSD-slice 222 "Reliable Write Sector Count"
+ * [out] memref[1] Buffer with the raw CID
+ */
+#define OPTEE_RPC_CMD_RPMB_PROBE_NEXT 23
+
+/* Type of RPMB device */
+#define OPTEE_RPC_RPMB_EMMC 0
+#define OPTEE_RPC_RPMB_UFS 1
+#define OPTEE_RPC_RPMB_NVME 2
+
+/*
+ * Replay Protected Memory Block access
+ *
+ * [in] memref[0] Frames to device
+ * [out] memref[1] Frames from device
+ */
+#define OPTEE_RPC_CMD_RPMB_FRAMES 24
+
#endif /*__OPTEE_RPC_CMD_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 73b5e7760d10..accf76a99288 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -105,6 +105,30 @@ struct optee_smc_call_get_os_revision_result {
};
/*
+ * Load Trusted OS from optee/tee.bin in the Linux firmware.
+ *
+ * WARNING: Use this cautiously as it could lead to insecure loading of the
+ * Trusted OS.
+ * This SMC instructs EL3 to load a binary and execute it as the Trusted OS.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_LOAD_IMAGE
+ * a1 Upper 32bit of a 64bit size for the payload
+ * a2 Lower 32bit of a 64bit size for the payload
+ * a3 Upper 32bit of the physical address for the payload
+ * a4 Lower 32bit of the physical address for the payload
+ *
+ * The payload is in the OP-TEE image format.
+ *
+ * Returns result in a0, 0 on success and an error code otherwise.
+ */
+#define OPTEE_SMC_FUNCID_LOAD_IMAGE 2
+#define OPTEE_SMC_CALL_LOAD_IMAGE \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_LOAD_IMAGE)
+
+/*
* Call with struct optee_msg_arg as argument
*
* When called with OPTEE_SMC_CALL_WITH_RPC_ARG or
@@ -240,7 +264,6 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
-
/*
* Secure world supports commands "register/unregister shared memory",
* secure world accepts command buffers located in any parts of non-secure RAM
@@ -254,6 +277,12 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
/* Secure world supports pre-allocating RPC arg struct */
#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
+/* Secure world supports probing for RPMB device if needed */
+#define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7)
+/* Secure world supports protected memory */
+#define OPTEE_SMC_SEC_CAP_PROTMEM BIT(8)
+/* Secure world supports dynamic protected memory */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM BIT(9)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -425,6 +454,38 @@ struct optee_smc_disable_shm_cache_result {
/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
+/*
+ * Get protected memory config
+ *
+ * Returns the protected memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_PROTMEM_CONFIG
+ * a2-6 Not used, must be zero
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of protected memory
+ * a2 Size of protected memory
+ * a3 PA width, max 64
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG 20
+#define OPTEE_SMC_GET_PROTMEM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG)
+
+struct optee_smc_get_protmem_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long pa_width;
+};
/*
* Resume from RPC (for example after processing a foreign interrupt)
diff --git a/drivers/tee/optee/protmem.c b/drivers/tee/optee/protmem.c
new file mode 100644
index 000000000000..2eba48d5ac73
--- /dev/null
+++ b/drivers/tee/optee/protmem.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Linaro Limited
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include "optee_private.h"
+
+struct optee_protmem_dyn_pool {
+ struct tee_protmem_pool pool;
+ struct gen_pool *gen_pool;
+ struct optee *optee;
+ size_t page_count;
+ u32 *mem_attrs;
+ u_int mem_attr_count;
+ refcount_t refcount;
+ u32 use_case;
+ struct tee_shm *protmem;
+ /* Protects when initializing and tearing down this struct */
+ struct mutex mutex;
+};
+
+static struct optee_protmem_dyn_pool *
+to_protmem_dyn_pool(struct tee_protmem_pool *pool)
+{
+ return container_of(pool, struct optee_protmem_dyn_pool, pool);
+}
+
+static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ int rc;
+
+ rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count);
+ if (IS_ERR(rp->protmem)) {
+ rc = PTR_ERR(rp->protmem);
+ goto err_null_protmem;
+ }
+
+ /*
+ * TODO unmap the memory range since the physical memory will
+ * become inaccesible after the lend_protmem() call.
+ *
+ * If the platform supports a hypervisor at EL2, it will unmap the
+ * intermediate physical memory for us and stop cache pre-fetch of
+ * the memory.
+ */
+ rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem,
+ rp->mem_attrs,
+ rp->mem_attr_count, rp->use_case);
+ if (rc)
+ goto err_put_shm;
+ rp->protmem->flags |= TEE_SHM_DYNAMIC;
+
+ rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!rp->gen_pool) {
+ rc = -ENOMEM;
+ goto err_reclaim;
+ }
+
+ rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr,
+ rp->protmem->size, -1);
+ if (rc)
+ goto err_free_pool;
+
+ refcount_set(&rp->refcount, 1);
+ return 0;
+
+err_free_pool:
+ gen_pool_destroy(rp->gen_pool);
+ rp->gen_pool = NULL;
+err_reclaim:
+ rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
+err_put_shm:
+ tee_shm_put(rp->protmem);
+err_null_protmem:
+ rp->protmem = NULL;
+ return rc;
+}
+
+static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ int rc = 0;
+
+ if (!refcount_inc_not_zero(&rp->refcount)) {
+ mutex_lock(&rp->mutex);
+ if (rp->gen_pool) {
+ /*
+ * Another thread has already initialized the pool
+ * before us, or the pool was just about to be torn
+ * down. Either way we only need to increase the
+ * refcount and we're done.
+ */
+ refcount_inc(&rp->refcount);
+ } else {
+ rc = init_dyn_protmem(rp);
+ }
+ mutex_unlock(&rp->mutex);
+ }
+
+ return rc;
+}
+
+static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ gen_pool_destroy(rp->gen_pool);
+ rp->gen_pool = NULL;
+
+ rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
+ rp->protmem->flags &= ~TEE_SHM_DYNAMIC;
+
+ WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount");
+ tee_shm_put(rp->protmem);
+ rp->protmem = NULL;
+}
+
+static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ if (refcount_dec_and_test(&rp->refcount)) {
+ mutex_lock(&rp->mutex);
+ if (rp->gen_pool)
+ release_dyn_protmem(rp);
+ mutex_unlock(&rp->mutex);
+ }
+}
+
+static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t size,
+ size_t *offs)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+ size_t sz = ALIGN(size, PAGE_SIZE);
+ phys_addr_t pa;
+ int rc;
+
+ rc = get_dyn_protmem(rp);
+ if (rc)
+ return rc;
+
+ pa = gen_pool_alloc(rp->gen_pool, sz);
+ if (!pa) {
+ rc = -ENOMEM;
+ goto err_put;
+ }
+
+ rc = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+
+ sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
+ *offs = pa - rp->protmem->paddr;
+
+ return 0;
+err_free:
+ gen_pool_free(rp->gen_pool, pa, size);
+err_put:
+ put_dyn_protmem(rp);
+
+ return rc;
+}
+
+static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool,
+ struct sg_table *sgt)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length);
+ sg_free_table(sgt);
+ put_dyn_protmem(rp);
+}
+
+static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t offs,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+
+ *parent_shm = rp->protmem;
+
+ return 0;
+}
+
+static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+
+ mutex_destroy(&rp->mutex);
+ kfree(rp);
+}
+
+static struct tee_protmem_pool_ops protmem_pool_ops_dyn = {
+ .alloc = protmem_pool_op_dyn_alloc,
+ .free = protmem_pool_op_dyn_free,
+ .update_shm = protmem_pool_op_dyn_update_shm,
+ .destroy_pool = pool_op_dyn_destroy_pool,
+};
+
+static int get_protmem_config(struct optee *optee, u32 use_case,
+ size_t *min_size, u_int *pa_width,
+ u32 *mem_attrs, u_int *ma_count)
+{
+ struct tee_param params[2] = {
+ [0] = {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT,
+ .u.value.a = use_case,
+ },
+ [1] = {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT,
+ },
+ };
+ struct optee_shm_arg_entry *entry;
+ struct tee_shm *shm_param = NULL;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ if (mem_attrs && *ma_count) {
+ params[1].u.memref.size = *ma_count * sizeof(*mem_attrs);
+ shm_param = tee_shm_alloc_priv_buf(optee->ctx,
+ params[1].u.memref.size);
+ if (IS_ERR(shm_param))
+ return PTR_ERR(shm_param);
+ params[1].u.memref.shm = shm_param;
+ }
+
+ msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry,
+ &shm, &offs);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out_free_shm;
+ }
+ msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG;
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params,
+ ARRAY_SIZE(params), params);
+ if (rc)
+ goto out_free_msg;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out_free_msg;
+ if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) {
+ rc = -EINVAL;
+ goto out_free_msg;
+ }
+
+ rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params),
+ msg_arg->params);
+ if (rc)
+ goto out_free_msg;
+
+ if (!msg_arg->ret && mem_attrs &&
+ *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) {
+ rc = -EINVAL;
+ goto out_free_msg;
+ }
+
+ *min_size = params[0].u.value.a;
+ *pa_width = params[0].u.value.c;
+ *ma_count = params[1].u.memref.size / sizeof(*mem_attrs);
+
+ if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) {
+ rc = -ENOSPC;
+ goto out_free_msg;
+ }
+
+ if (mem_attrs)
+ memcpy(mem_attrs, tee_shm_get_va(shm_param, 0),
+ params[1].u.memref.size);
+
+out_free_msg:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+out_free_shm:
+ if (shm_param)
+ tee_shm_free(shm_param);
+ return rc;
+}
+
+struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
+ enum tee_dma_heap_id id)
+{
+ struct optee_protmem_dyn_pool *rp;
+ size_t min_size;
+ u_int pa_width;
+ int rc;
+
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return ERR_PTR(-ENOMEM);
+ rp->use_case = id;
+
+ rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL,
+ &rp->mem_attr_count);
+ if (rc) {
+ if (rc != -ENOSPC)
+ goto err;
+ rp->mem_attrs = kcalloc(rp->mem_attr_count,
+ sizeof(*rp->mem_attrs), GFP_KERNEL);
+ if (!rp->mem_attrs) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ rc = get_protmem_config(optee, id, &min_size, &pa_width,
+ rp->mem_attrs, &rp->mem_attr_count);
+ if (rc)
+ goto err_kfree_eps;
+ }
+
+ rc = optee_set_dma_mask(optee, pa_width);
+ if (rc)
+ goto err_kfree_eps;
+
+ rp->pool.ops = &protmem_pool_ops_dyn;
+ rp->optee = optee;
+ rp->page_count = min_size / PAGE_SIZE;
+ mutex_init(&rp->mutex);
+
+ return &rp->pool;
+
+err_kfree_eps:
+ kfree(rp->mem_attrs);
+err:
+ kfree(rp);
+ return ERR_PTR(rc);
+}
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index e69bc6380683..ebbbd42b0e3e 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -7,8 +7,9 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/rpmb.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include "optee_private.h"
#include "optee_rpc_cmd.h"
@@ -130,6 +131,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
static void handle_rpc_func_cmd_wq(struct optee *optee,
struct optee_msg_arg *arg)
{
+ int rc = 0;
+
if (arg->num_params != 1)
goto bad;
@@ -139,7 +142,8 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
switch (arg->params[0].u.value.a) {
case OPTEE_RPC_NOTIFICATION_WAIT:
- if (optee_notif_wait(optee, arg->params[0].u.value.b))
+ rc = optee_notif_wait(optee, arg->params[0].u.value.b, arg->params[0].u.value.c);
+ if (rc)
goto bad;
break;
case OPTEE_RPC_NOTIFICATION_SEND:
@@ -153,7 +157,10 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
arg->ret = TEEC_SUCCESS;
return;
bad:
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ if (rc == -ETIMEDOUT)
+ arg->ret = TEE_ERROR_TIMEOUT;
+ else
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
}
static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
@@ -255,6 +262,154 @@ void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
}
+static void handle_rpc_func_rpmb_probe_reset(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param params[1];
+
+ if (arg->num_params != ARRAY_SIZE(params) ||
+ optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params) ||
+ params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ params[0].u.value.a = OPTEE_RPC_SHM_TYPE_KERNEL;
+ params[0].u.value.b = 0;
+ params[0].u.value.c = 0;
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ mutex_lock(&optee->rpmb_dev_mutex);
+ rpmb_dev_put(optee->rpmb_dev);
+ optee->rpmb_dev = NULL;
+ mutex_unlock(&optee->rpmb_dev_mutex);
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static int rpmb_type_to_rpc_type(enum rpmb_type rtype)
+{
+ switch (rtype) {
+ case RPMB_TYPE_EMMC:
+ return OPTEE_RPC_RPMB_EMMC;
+ case RPMB_TYPE_UFS:
+ return OPTEE_RPC_RPMB_UFS;
+ case RPMB_TYPE_NVME:
+ return OPTEE_RPC_RPMB_NVME;
+ default:
+ return -1;
+ }
+}
+
+static int rpc_rpmb_match(struct device *dev, const void *data)
+{
+ struct rpmb_dev *rdev = to_rpmb_dev(dev);
+
+ return rpmb_type_to_rpc_type(rdev->descr.type) >= 0;
+}
+
+static void handle_rpc_func_rpmb_probe_next(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct rpmb_dev *rdev;
+ struct tee_param params[2];
+ void *buf;
+
+ if (arg->num_params != ARRAY_SIZE(params) ||
+ optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params) ||
+ params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT ||
+ params[1].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ buf = tee_shm_get_va(params[1].u.memref.shm,
+ params[1].u.memref.shm_offs);
+ if (IS_ERR(buf)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ mutex_lock(&optee->rpmb_dev_mutex);
+ rdev = rpmb_dev_find_device(NULL, optee->rpmb_dev, rpc_rpmb_match);
+ rpmb_dev_put(optee->rpmb_dev);
+ optee->rpmb_dev = rdev;
+ mutex_unlock(&optee->rpmb_dev_mutex);
+
+ if (!rdev) {
+ arg->ret = TEEC_ERROR_ITEM_NOT_FOUND;
+ return;
+ }
+
+ if (params[1].u.memref.size < rdev->descr.dev_id_len) {
+ arg->ret = TEEC_ERROR_SHORT_BUFFER;
+ return;
+ }
+ memcpy(buf, rdev->descr.dev_id, rdev->descr.dev_id_len);
+ params[1].u.memref.size = rdev->descr.dev_id_len;
+ params[0].u.value.a = rpmb_type_to_rpc_type(rdev->descr.type);
+ params[0].u.value.b = rdev->descr.capacity;
+ params[0].u.value.c = rdev->descr.reliable_wr_count;
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_rpmb_frames(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param params[2];
+ struct rpmb_dev *rdev;
+ void *p0, *p1;
+
+ mutex_lock(&optee->rpmb_dev_mutex);
+ rdev = rpmb_dev_get(optee->rpmb_dev);
+ mutex_unlock(&optee->rpmb_dev_mutex);
+ if (!rdev) {
+ arg->ret = TEEC_ERROR_ITEM_NOT_FOUND;
+ return;
+ }
+
+ if (arg->num_params != ARRAY_SIZE(params) ||
+ optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params) ||
+ params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT ||
+ params[1].attr != TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ p0 = tee_shm_get_va(params[0].u.memref.shm,
+ params[0].u.memref.shm_offs);
+ p1 = tee_shm_get_va(params[1].u.memref.shm,
+ params[1].u.memref.shm_offs);
+ if (rpmb_route_frames(rdev, p0, params[0].u.memref.size, p1,
+ params[1].u.memref.size)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+ arg->ret = TEEC_SUCCESS;
+out:
+ rpmb_dev_put(rdev);
+}
+
void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg)
{
@@ -271,6 +426,34 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_I2C_TRANSFER:
handle_rpc_func_cmd_i2c_transfer(ctx, arg);
break;
+ /*
+ * optee->in_kernel_rpmb_routing true means that OP-TEE supports
+ * in-kernel RPMB routing _and_ that the RPMB subsystem is
+ * reachable. This is reported to user space with
+ * rpmb_routing_model=kernel in sysfs.
+ *
+ * rpmb_routing_model=kernel is also a promise to user space that
+ * RPMB access will not require supplicant support, hence the
+ * checks below.
+ */
+ case OPTEE_RPC_CMD_RPMB_PROBE_RESET:
+ if (optee->in_kernel_rpmb_routing)
+ handle_rpc_func_rpmb_probe_reset(ctx, optee, arg);
+ else
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ break;
+ case OPTEE_RPC_CMD_RPMB_PROBE_NEXT:
+ if (optee->in_kernel_rpmb_routing)
+ handle_rpc_func_rpmb_probe_next(ctx, optee, arg);
+ else
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ break;
+ case OPTEE_RPC_CMD_RPMB_FRAMES:
+ if (optee->in_kernel_rpmb_routing)
+ handle_rpc_func_rpmb_frames(ctx, optee, arg);
+ else
+ handle_rpc_supp_cmd(ctx, optee, arg);
+ break;
default:
handle_rpc_supp_cmd(ctx, optee, arg);
}
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index a1c1fa1a9c28..0be663fcd52b 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1,26 +1,30 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
* Copyright (c) 2016, EPAM Systems
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
#include <linux/errno.h>
+#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/rpmb.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "optee_private.h"
@@ -52,6 +56,23 @@
*/
#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */
+/* SMC ABI considers at most a single TEE firmware */
+static unsigned int pcpu_irq_num;
+
+static int optee_cpuhp_enable_pcpu_irq(unsigned int cpu)
+{
+ enable_percpu_irq(pcpu_irq_num, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int optee_cpuhp_disable_pcpu_irq(unsigned int cpu)
+{
+ disable_percpu_irq(pcpu_irq_num);
+
+ return 0;
+}
+
/*
* 1. Convert between struct tee_param and struct optee_msg_param
*
@@ -263,7 +284,7 @@ static void optee_enable_shm_cache(struct optee *optee)
struct optee_call_waiter w;
/* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, false);
while (true) {
struct arm_smccc_res res;
@@ -288,7 +309,7 @@ static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
struct optee_call_waiter w;
/* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, false);
while (true) {
union {
struct arm_smccc_res smccc;
@@ -487,7 +508,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
@@ -530,7 +551,7 @@ static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
- if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
out:
@@ -572,19 +593,18 @@ static int pool_op_alloc(struct tee_shm_pool *pool,
* to be registered with OP-TEE.
*/
if (shm->flags & TEE_SHM_PRIV)
- return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
+ return tee_dyn_shm_alloc_helper(shm, size, align, NULL);
- return optee_pool_op_alloc_helper(pool, shm, size, align,
- optee_shm_register);
+ return tee_dyn_shm_alloc_helper(shm, size, align, optee_shm_register);
}
static void pool_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
if (!(shm->flags & TEE_SHM_PRIV))
- optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
+ tee_dyn_shm_free_helper(shm, optee_shm_unregister);
else
- optee_pool_op_free_helper(pool, shm, NULL);
+ tee_dyn_shm_free_helper(shm, NULL);
}
static void pool_op_destroy_pool(struct tee_shm_pool *pool)
@@ -658,10 +678,11 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
- phys_addr_t pa;
struct tee_shm *shm;
size_t sz;
size_t n;
+ struct page **pages;
+ size_t page_count;
arg->ret_origin = TEEC_ORIGIN_COMMS;
@@ -696,32 +717,23 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
return;
}
- if (tee_shm_get_pa(shm, 0, &pa)) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- goto bad;
- }
-
- sz = tee_shm_get_size(shm);
-
- if (tee_shm_is_dynamic(shm)) {
- struct page **pages;
+ /*
+ * If there are pages it's dynamically allocated shared memory (not
+ * from the reserved shared memory pool) and needs to be
+ * registered.
+ */
+ pages = tee_shm_get_pages(shm, &page_count);
+ if (pages) {
u64 *pages_list;
- size_t page_num;
- pages = tee_shm_get_pages(shm, &page_num);
- if (!pages || !page_num) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
-
- pages_list = optee_allocate_pages_list(page_num);
+ pages_list = optee_allocate_pages_list(page_count);
if (!pages_list) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
call_ctx->pages_list = pages_list;
- call_ctx->num_entries = page_num;
+ call_ctx->num_entries = page_count;
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
@@ -732,17 +744,22 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) &
(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- arg->params[0].u.tmem.size = tee_shm_get_size(shm);
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
- optee_fill_pages_list(pages_list, pages, page_num,
+ optee_fill_pages_list(pages_list, pages, page_count,
tee_shm_get_page_offset(shm));
} else {
+ phys_addr_t pa;
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
}
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
arg->ret = TEEC_SUCCESS;
return;
@@ -786,6 +803,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
/**
* optee_handle_rpc() - handle RPC from secure world
* @ctx: context doing the RPC
+ * @rpc_arg: pointer to RPC arguments if any, or NULL if none
* @param: value of registers for the RPC
* @call_ctx: call context. Preserved during one OP-TEE invocation
*
@@ -858,6 +876,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
* @offs: offset of the message in @shm
+ * @system_thread: true if caller requests TEE system thread support
*
* Does and SMC to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -865,7 +884,8 @@ static void optee_handle_rpc(struct tee_context *ctx,
* Returns return code from secure world, 0 is OK
*/
static int optee_smc_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm, u_int offs)
+ struct tee_shm *shm, u_int offs,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
@@ -906,7 +926,7 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
reg_pair_from_64(&param.a1, &param.a2, parg);
}
/* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, system_thread);
while (true) {
struct arm_smccc_res res;
@@ -945,32 +965,68 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
-static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+static int optee_smc_lend_protmem(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attrs, unsigned int ma_count,
+ u32 use_case)
{
struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct tee_shm *shm;
u_int offs;
+ int rc;
- msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs);
if (IS_ERR(msg_arg))
return PTR_ERR(msg_arg);
- msg_arg->cmd = cmd;
- optee_smc_do_call_with_arg(ctx, shm, offs);
+ msg_arg->cmd = OPTEE_MSG_CMD_LEND_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = use_case;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ msg_arg->params[1].u.tmem.buf_ptr = protmem->paddr;
+ msg_arg->params[1].u.tmem.size = protmem->size;
+ msg_arg->params[1].u.tmem.shm_ref = (u_long)protmem;
- optee_free_msg_arg(ctx, entry, offs);
- return 0;
-}
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+ protmem->sec_world_id = (u_long)protmem;
-static int optee_smc_do_bottom_half(struct tee_context *ctx)
-{
- return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
}
-static int optee_smc_stop_async_notif(struct tee_context *ctx)
+static int optee_smc_reclaim_protmem(struct optee *optee,
+ struct tee_shm *protmem)
{
- return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (u_long)protmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
}
/*
@@ -984,16 +1040,17 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
- if (res.a0)
+ if (res.a0) {
+ *value_valid = false;
return 0;
+ }
*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
return res.a1;
}
-static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+static irqreturn_t irq_handler(struct optee *optee)
{
- struct optee *optee = dev_id;
bool do_bottom_half = false;
bool value_valid;
bool value_pending;
@@ -1016,16 +1073,23 @@ static irqreturn_t notif_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+
+ return irq_handler(optee);
+}
+
static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->ctx);
+ optee_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
-static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+static int init_irq(struct optee *optee, u_int irq)
{
int rc;
@@ -1040,12 +1104,103 @@ static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
return 0;
}
+static irqreturn_t notif_pcpu_irq_handler(int irq, void *dev_id)
+{
+ struct optee_pcpu *pcpu = dev_id;
+ struct optee *optee = pcpu->optee;
+
+ if (irq_handler(optee) == IRQ_WAKE_THREAD)
+ queue_work(optee->smc.notif_pcpu_wq,
+ &optee->smc.notif_pcpu_work);
+
+ return IRQ_HANDLED;
+}
+
+static void notif_pcpu_irq_work_fn(struct work_struct *work)
+{
+ struct optee_smc *optee_smc = container_of(work, struct optee_smc,
+ notif_pcpu_work);
+ struct optee *optee = container_of(optee_smc, struct optee, smc);
+
+ optee_do_bottom_half(optee->ctx);
+}
+
+static int init_pcpu_irq(struct optee *optee, u_int irq)
+{
+ struct optee_pcpu __percpu *optee_pcpu;
+ int cpu, rc;
+
+ optee_pcpu = alloc_percpu(struct optee_pcpu);
+ if (!optee_pcpu)
+ return -ENOMEM;
+
+ for_each_present_cpu(cpu)
+ per_cpu_ptr(optee_pcpu, cpu)->optee = optee;
+
+ rc = request_percpu_irq(irq, notif_pcpu_irq_handler,
+ "optee_pcpu_notification", optee_pcpu);
+ if (rc)
+ goto err_free_pcpu;
+
+ INIT_WORK(&optee->smc.notif_pcpu_work, notif_pcpu_irq_work_fn);
+ optee->smc.notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
+ if (!optee->smc.notif_pcpu_wq) {
+ rc = -EINVAL;
+ goto err_free_pcpu_irq;
+ }
+
+ optee->smc.optee_pcpu = optee_pcpu;
+ optee->smc.notif_irq = irq;
+
+ pcpu_irq_num = irq;
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "optee/pcpu-notif:starting",
+ optee_cpuhp_enable_pcpu_irq,
+ optee_cpuhp_disable_pcpu_irq);
+ if (!rc)
+ rc = -EINVAL;
+ if (rc < 0)
+ goto err_free_pcpu_irq;
+
+ optee->smc.notif_cpuhp_state = rc;
+
+ return 0;
+
+err_free_pcpu_irq:
+ free_percpu_irq(irq, optee_pcpu);
+err_free_pcpu:
+ free_percpu(optee_pcpu);
+
+ return rc;
+}
+
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+{
+ if (irq_is_percpu_devid(irq))
+ return init_pcpu_irq(optee, irq);
+ else
+ return init_irq(optee, irq);
+}
+
+static void uninit_pcpu_irq(struct optee *optee)
+{
+ cpuhp_remove_state(optee->smc.notif_cpuhp_state);
+
+ destroy_workqueue(optee->smc.notif_pcpu_wq);
+
+ free_percpu_irq(optee->smc.notif_irq, optee->smc.optee_pcpu);
+ free_percpu(optee->smc.optee_pcpu);
+}
+
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
- optee_smc_stop_async_notif(optee->ctx);
+ optee_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
- free_irq(optee->smc.notif_irq, optee);
+ if (irq_is_percpu_devid(optee->smc.notif_irq))
+ uninit_pcpu_irq(optee);
+ else
+ free_irq(optee->smc.notif_irq, optee);
+
irq_dispose_mapping(optee->smc.notif_irq);
}
}
@@ -1091,6 +1246,7 @@ static const struct tee_driver_ops optee_clnt_ops = {
.release = optee_release,
.open_session = optee_open_session,
.close_session = optee_close_session,
+ .system_session = optee_system_session,
.invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req,
.shm_register = optee_shm_register,
@@ -1124,6 +1280,8 @@ static const struct optee_ops optee_ops = {
.do_call_with_arg = optee_smc_do_call_with_arg,
.to_msg_param = optee_to_msg_param,
.from_msg_param = optee_from_msg_param,
+ .lend_protmem = optee_smc_lend_protmem,
+ .reclaim_protmem = optee_smc_reclaim_protmem,
};
static int enable_async_notif(optee_invoke_fn *invoke_fn)
@@ -1149,6 +1307,22 @@ static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
return false;
}
+#ifdef CONFIG_OPTEE_INSECURE_LOAD_IMAGE
+static bool optee_msg_api_uid_is_optee_image_load(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_IMAGE_LOAD_UID_0 &&
+ res.a1 == OPTEE_MSG_IMAGE_LOAD_UID_1 &&
+ res.a2 == OPTEE_MSG_IMAGE_LOAD_UID_2 &&
+ res.a3 == OPTEE_MSG_IMAGE_LOAD_UID_3)
+ return true;
+ return false;
+}
+#endif
+
static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
{
union {
@@ -1164,8 +1338,9 @@ static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
&res.smccc);
if (res.result.build_id)
- pr_info("revision %lu.%lu (%08lx)", res.result.major,
- res.result.minor, res.result.build_id);
+ pr_info("revision %lu.%lu (%0*lx)", res.result.major,
+ res.result.minor, (int)sizeof(res.result.build_id) * 2,
+ res.result.build_id);
else
pr_info("revision %lu.%lu", res.result.major, res.result.minor);
}
@@ -1222,6 +1397,16 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return true;
}
+static unsigned int optee_msg_get_thread_count(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_THREAD_COUNT, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return 0;
+ return res.a1;
+}
+
static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
{
@@ -1315,7 +1500,7 @@ static optee_invoke_fn *get_invoke_func(struct device *dev)
* optee_remove is called by platform subsystem to alert the driver
* that it should release the device
*/
-static int optee_smc_remove(struct platform_device *pdev)
+static void optee_smc_remove(struct platform_device *pdev)
{
struct optee *optee = platform_get_drvdata(pdev);
@@ -1335,8 +1520,6 @@ static int optee_smc_remove(struct platform_device *pdev)
memunmap(optee->smc.memremaped_shm);
kfree(optee);
-
- return 0;
}
/* optee_shutdown - Device Removal Routine
@@ -1354,6 +1537,186 @@ static void optee_shutdown(struct platform_device *pdev)
optee_disable_shm_cache(optee);
}
+#ifdef CONFIG_OPTEE_INSECURE_LOAD_IMAGE
+
+#define OPTEE_FW_IMAGE "optee/tee.bin"
+
+static optee_invoke_fn *cpuhp_invoke_fn;
+
+static int optee_cpuhp_probe(unsigned int cpu)
+{
+ /*
+ * Invoking a call on a CPU will cause OP-TEE to perform the required
+ * setup for that CPU. Just invoke the call to get the UID since that
+ * has no side effects.
+ */
+ if (optee_msg_api_uid_is_optee_api(cpuhp_invoke_fn))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int optee_load_fw(struct platform_device *pdev,
+ optee_invoke_fn *invoke_fn)
+{
+ const struct firmware *fw = NULL;
+ struct arm_smccc_res res;
+ phys_addr_t data_pa;
+ u8 *data_buf = NULL;
+ u64 data_size;
+ u32 data_pa_high, data_pa_low;
+ u32 data_size_high, data_size_low;
+ int rc;
+ int hp_state;
+
+ if (!optee_msg_api_uid_is_optee_image_load(invoke_fn))
+ return 0;
+
+ rc = request_firmware(&fw, OPTEE_FW_IMAGE, &pdev->dev);
+ if (rc) {
+ /*
+ * The firmware in the rootfs will not be accessible until we
+ * are in the SYSTEM_RUNNING state, so return EPROBE_DEFER until
+ * that point.
+ */
+ if (system_state < SYSTEM_RUNNING)
+ return -EPROBE_DEFER;
+ goto fw_err;
+ }
+
+ data_size = fw->size;
+ /*
+ * This uses the GFP_DMA flag to ensure we are allocated memory in the
+ * 32-bit space since TF-A cannot map memory beyond the 32-bit boundary.
+ */
+ data_buf = kmemdup(fw->data, fw->size, GFP_KERNEL | GFP_DMA);
+ if (!data_buf) {
+ rc = -ENOMEM;
+ goto fw_err;
+ }
+ data_pa = virt_to_phys(data_buf);
+ reg_pair_from_64(&data_pa_high, &data_pa_low, data_pa);
+ reg_pair_from_64(&data_size_high, &data_size_low, data_size);
+ goto fw_load;
+
+fw_err:
+ pr_warn("image loading failed\n");
+ data_pa_high = 0;
+ data_pa_low = 0;
+ data_size_high = 0;
+ data_size_low = 0;
+
+fw_load:
+ /*
+ * Always invoke the SMC, even if loading the image fails, to indicate
+ * to EL3 that we have passed the point where it should allow invoking
+ * this SMC.
+ */
+ pr_warn("OP-TEE image loaded from kernel, this can be insecure");
+ invoke_fn(OPTEE_SMC_CALL_LOAD_IMAGE, data_size_high, data_size_low,
+ data_pa_high, data_pa_low, 0, 0, 0, &res);
+ if (!rc)
+ rc = res.a0;
+ release_firmware(fw);
+ kfree(data_buf);
+
+ if (!rc) {
+ /*
+ * We need to initialize OP-TEE on all other running cores as
+ * well. Any cores that aren't running yet will get initialized
+ * when they are brought up by the power management functions in
+ * TF-A which are registered by the OP-TEE SPD. Due to that we
+ * can un-register the callback right after registering it.
+ */
+ cpuhp_invoke_fn = invoke_fn;
+ hp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "optee:probe",
+ optee_cpuhp_probe, NULL);
+ if (hp_state < 0) {
+ pr_warn("Failed with CPU hotplug setup for OP-TEE");
+ return -EINVAL;
+ }
+ cpuhp_remove_state(hp_state);
+ cpuhp_invoke_fn = NULL;
+ }
+
+ return rc;
+}
+#else
+static inline int optee_load_fw(struct platform_device *pdev,
+ optee_invoke_fn *invoke_fn)
+{
+ return 0;
+}
+#endif
+
+static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee)
+{
+#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL)
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_protmem_config_result result;
+ } res;
+ struct tee_protmem_pool *pool;
+ void *p;
+ int rc;
+
+ optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0,
+ 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return ERR_PTR(-EINVAL);
+
+ rc = optee_set_dma_mask(optee, res.result.pa_width);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /*
+ * Map the memory as uncached to make sure the kernel can work with
+ * __pfn_to_page() and friends since that's needed when passing the
+ * protected DMA-buf to a device. The memory should otherwise not
+ * be touched by the kernel since it's likely to cause an external
+ * abort due to the protection status.
+ */
+ p = devm_memremap(&optee->teedev->dev, res.result.start,
+ res.result.size, MEMREMAP_WC);
+ if (IS_ERR(p))
+ return p;
+
+ pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size);
+ if (IS_ERR(pool))
+ devm_memunmap(&optee->teedev->dev, p);
+
+ return pool;
+#else
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static int optee_protmem_pool_init(struct optee *optee)
+{
+ bool protm = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM;
+ bool dyn_protm = optee->smc.sec_caps &
+ OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM;
+ enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+ struct tee_protmem_pool *pool = ERR_PTR(-EINVAL);
+ int rc = -EINVAL;
+
+ if (!protm && !dyn_protm)
+ return 0;
+
+ if (protm)
+ pool = static_protmem_pool_init(optee);
+ if (dyn_protm && IS_ERR(pool))
+ pool = optee_protmem_alloc_dyn_pool(optee, heap_id);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool);
+ if (rc)
+ pool->ops->destroy_pool(pool);
+
+ return rc;
+}
+
static int optee_probe(struct platform_device *pdev)
{
optee_invoke_fn *invoke_fn;
@@ -1361,6 +1724,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
unsigned int rpc_param_count;
+ unsigned int thread_count;
struct tee_device *teedev;
struct tee_context *ctx;
u32 max_notif_value;
@@ -1372,6 +1736,10 @@ static int optee_probe(struct platform_device *pdev)
if (IS_ERR(invoke_fn))
return PTR_ERR(invoke_fn);
+ rc = optee_load_fw(pdev, invoke_fn);
+ if (rc)
+ return rc;
+
if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
pr_warn("api uid mismatch\n");
return -EINVAL;
@@ -1384,6 +1752,7 @@ static int optee_probe(struct platform_device *pdev)
return -EINVAL;
}
+ thread_count = optee_msg_get_thread_count(invoke_fn);
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
&max_notif_value,
&rpc_param_count)) {
@@ -1443,7 +1812,7 @@ static int optee_probe(struct platform_device *pdev)
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;
- goto err_free_pool;
+ goto err_free_shm_pool;
}
optee->ops = &optee_ops;
@@ -1451,6 +1820,10 @@ static int optee_probe(struct platform_device *pdev)
optee->smc.sec_caps = sec_caps;
optee->rpc_param_count = rpc_param_count;
+ if (IS_REACHABLE(CONFIG_RPMB) &&
+ (sec_caps & OPTEE_SMC_SEC_CAP_RPMB_PROBE))
+ optee->in_kernel_rpmb_routing = true;
+
teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
@@ -1465,6 +1838,8 @@ static int optee_probe(struct platform_device *pdev)
}
optee->supp_teedev = teedev;
+ optee_set_dev_group(optee);
+
rc = tee_device_register(optee->teedev);
if (rc)
goto err_unreg_supp_teedev;
@@ -1473,12 +1848,12 @@ static int optee_probe(struct platform_device *pdev)
if (rc)
goto err_unreg_supp_teedev;
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_cq_init(&optee->call_queue, thread_count);
optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
optee_shm_arg_cache_init(optee, arg_cache_flags);
+ mutex_init(&optee->rpmb_dev_mutex);
platform_set_drvdata(pdev, optee);
ctx = teedev_open(optee->teedev);
@@ -1510,6 +1885,9 @@ static int optee_probe(struct platform_device *pdev)
pr_info("Asynchronous notifications enabled\n");
}
+ if (optee_protmem_pool_init(optee))
+ pr_info("Protected memory service not available\n");
+
/*
* Ensure that there are no pre-existing shm objects before enabling
* the shm cache so that there's no chance of receiving an invalid
@@ -1533,6 +1911,10 @@ static int optee_probe(struct platform_device *pdev)
if (rc)
goto err_disable_shm_cache;
+ INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb);
+ optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev;
+ blocking_notifier_chain_register(&optee_rpmb_intf_added,
+ &optee->rpmb_intf);
pr_info("initialized driver\n");
return 0;
@@ -1546,6 +1928,8 @@ err_notif_uninit:
err_close_ctx:
teedev_close_context(ctx);
err_supp_uninit:
+ rpmb_dev_put(optee->rpmb_dev);
+ mutex_destroy(&optee->rpmb_dev_mutex);
optee_shm_arg_cache_uninit(optee);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
@@ -1555,7 +1939,7 @@ err_unreg_teedev:
tee_device_unregister(optee->teedev);
err_free_optee:
kfree(optee);
-err_free_pool:
+err_free_shm_pool:
tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 322a543b8c27..d0f397c90242 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
- bool interruptable;
u32 ret;
/*
@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/*
* Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(&req->c) successfully we have
- * exclusive access again.
+ * exclusive access again. Allow the wait to be killable such that
+ * the wait doesn't turn into an indefinite state if the supplicant
+ * gets hung for some reason.
*/
- while (wait_for_completion_interruptible(&req->c)) {
+ if (wait_for_completion_killable(&req->c)) {
mutex_lock(&supp->mutex);
- interruptable = !supp->ctx;
- if (interruptable) {
- /*
- * There's no supplicant available and since the
- * supp->mutex currently is held none can
- * become available until the mutex released
- * again.
- *
- * Interrupting an RPC to supplicant is only
- * allowed as a way of slightly improving the user
- * experience in case the supplicant hasn't been
- * started yet. During normal operation the supplicant
- * will serve all requests in a timely manner and
- * interrupting then wouldn't make sense.
- */
- if (req->in_queue) {
- list_del(&req->link);
- req->in_queue = false;
- }
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
}
mutex_unlock(&supp->mutex);
-
- if (interruptable) {
- req->ret = TEEC_ERROR_COMMUNICATION;
- break;
- }
+ req->ret = TEEC_ERROR_COMMUNICATION;
}
ret = req->ret;
diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig
new file mode 100644
index 000000000000..9f19dee08db4
--- /dev/null
+++ b/drivers/tee/qcomtee/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Qualcomm Trusted Execution Environment Configuration
+config QCOMTEE
+ tristate "Qualcomm TEE Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !CPU_BIG_ENDIAN
+ select QCOM_SCM
+ select QCOM_TZMEM_MODE_SHMBRIDGE
+ help
+ This option enables the Qualcomm Trusted Execution Environment (QTEE)
+ driver. It provides an API to access services offered by QTEE and
+ its loaded Trusted Applications (TAs). Additionally, it facilitates
+ the export of userspace services provided by supplicants to QTEE.
diff --git a/drivers/tee/qcomtee/Makefile b/drivers/tee/qcomtee/Makefile
new file mode 100644
index 000000000000..7c466c9f32af
--- /dev/null
+++ b/drivers/tee/qcomtee/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_QCOMTEE) += qcomtee.o
+qcomtee-objs += async.o
+qcomtee-objs += call.o
+qcomtee-objs += core.o
+qcomtee-objs += mem_obj.o
+qcomtee-objs += primordial_obj.o
+qcomtee-objs += shm.o
+qcomtee-objs += user_obj.o
diff --git a/drivers/tee/qcomtee/async.c b/drivers/tee/qcomtee/async.c
new file mode 100644
index 000000000000..31bff4309e67
--- /dev/null
+++ b/drivers/tee/qcomtee/async.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "qcomtee.h"
+
+#define QCOMTEE_ASYNC_VERSION_1_0 0x00010000U /* Maj: 0x0001, Min: 0x0000. */
+#define QCOMTEE_ASYNC_VERSION_1_1 0x00010001U /* Maj: 0x0001, Min: 0x0001. */
+#define QCOMTEE_ASYNC_VERSION_1_2 0x00010002U /* Maj: 0x0001, Min: 0x0002. */
+#define QCOMTEE_ASYNC_VERSION_CURRENT QCOMTEE_ASYNC_VERSION_1_2
+
+#define QCOMTEE_ASYNC_VERSION_MAJOR(n) upper_16_bits(n)
+#define QCOMTEE_ASYNC_VERSION_MINOR(n) lower_16_bits(n)
+
+#define QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR \
+ QCOMTEE_ASYNC_VERSION_MAJOR(QCOMTEE_ASYNC_VERSION_CURRENT)
+#define QCOMTEE_ASYNC_VERSION_CURRENT_MINOR \
+ QCOMTEE_ASYNC_VERSION_MINOR(QCOMTEE_ASYNC_VERSION_CURRENT)
+
+/**
+ * struct qcomtee_async_msg_hdr - Asynchronous message header format.
+ * @version: current async protocol version of the remote endpoint.
+ * @op: async operation.
+ *
+ * @version specifies the endpoint's (QTEE or driver) supported async protocol.
+ * For example, if QTEE sets @version to %QCOMTEE_ASYNC_VERSION_1_1, QTEE
+ * handles operations supported in %QCOMTEE_ASYNC_VERSION_1_1 or
+ * %QCOMTEE_ASYNC_VERSION_1_0. @op determines the message format.
+ */
+struct qcomtee_async_msg_hdr {
+ u32 version;
+ u32 op;
+};
+
+/* Size of an empty async message. */
+#define QCOMTEE_ASYNC_MSG_ZERO sizeof(struct qcomtee_async_msg_hdr)
+
+/**
+ * struct qcomtee_async_release_msg - Release asynchronous message.
+ * @hdr: message header as &struct qcomtee_async_msg_hdr.
+ * @counts: number of objects in @object_ids.
+ * @object_ids: array of object IDs that should be released.
+ *
+ * Available in Maj = 0x0001, Min >= 0x0000.
+ */
+struct qcomtee_async_release_msg {
+ struct qcomtee_async_msg_hdr hdr;
+ u32 counts;
+ u32 object_ids[] __counted_by(counts);
+};
+
+/**
+ * qcomtee_get_async_buffer() - Get the start of the asynchronous message.
+ * @oic: context used for the current invocation.
+ * @async_buffer: return buffer to extract from or fill in async messages.
+ *
+ * If @oic is used for direct object invocation, the whole outbound buffer
+ * is available for the async message. If @oic is used for a callback request,
+ * the tail of the outbound buffer (after the callback request message) is
+ * available for the async message.
+ *
+ * The start of the async buffer is aligned, see qcomtee_msg_offset_align().
+ */
+static void qcomtee_get_async_buffer(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_buffer *async_buffer)
+{
+ struct qcomtee_msg_callback *msg;
+ unsigned int offset;
+ int i;
+
+ if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY)) {
+ /* The outbound buffer is empty. Using the whole buffer. */
+ offset = 0;
+ } else {
+ msg = (struct qcomtee_msg_callback *)oic->out_msg.addr;
+
+ /* Start offset in a message for buffer arguments. */
+ offset = qcomtee_msg_buffer_args(struct qcomtee_msg_callback,
+ qcomtee_msg_args(msg));
+
+ /* Add size of IB arguments. */
+ qcomtee_msg_for_each_input_buffer(i, msg)
+ offset += qcomtee_msg_offset_align(msg->args[i].b.size);
+
+ /* Add size of OB arguments. */
+ qcomtee_msg_for_each_output_buffer(i, msg)
+ offset += qcomtee_msg_offset_align(msg->args[i].b.size);
+ }
+
+ async_buffer->addr = oic->out_msg.addr + offset;
+ async_buffer->size = oic->out_msg.size - offset;
+}
+
+/**
+ * async_release() - Process QTEE async release requests.
+ * @oic: context used for the current invocation.
+ * @msg: async message for object release.
+ * @size: size of the async buffer available.
+ *
+ * Return: Size of the outbound buffer used when processing @msg.
+ */
+static size_t async_release(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_async_msg_hdr *async_msg,
+ size_t size)
+{
+ struct qcomtee_async_release_msg *msg;
+ struct qcomtee_object *object;
+ int i;
+
+ msg = (struct qcomtee_async_release_msg *)async_msg;
+
+ for (i = 0; i < msg->counts; i++) {
+ object = qcomtee_idx_erase(oic, msg->object_ids[i]);
+ qcomtee_object_put(object);
+ }
+
+ return struct_size(msg, object_ids, msg->counts);
+}
+
+/**
+ * qcomtee_fetch_async_reqs() - Fetch and process asynchronous messages.
+ * @oic: context used for the current invocation.
+ *
+ * Calls handlers to process the requested operations in the async message.
+ * Currently, only supports async release requests.
+ */
+void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_async_msg_hdr *async_msg;
+ struct qcomtee_buffer async_buffer;
+ size_t consumed, used = 0;
+ u16 major_ver;
+
+ qcomtee_get_async_buffer(oic, &async_buffer);
+
+ while (async_buffer.size - used > QCOMTEE_ASYNC_MSG_ZERO) {
+ async_msg = (struct qcomtee_async_msg_hdr *)(async_buffer.addr +
+ used);
+ /*
+ * QTEE assumes that the unused space of the async buffer is
+ * zeroed; so if version is zero, the buffer is unused.
+ */
+ if (async_msg->version == 0)
+ goto out;
+
+ major_ver = QCOMTEE_ASYNC_VERSION_MAJOR(async_msg->version);
+ /* Major version mismatch is a compatibility break. */
+ if (major_ver != QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR) {
+ pr_err("Async message version mismatch (%u != %u)\n",
+ major_ver, QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR);
+
+ goto out;
+ }
+
+ switch (async_msg->op) {
+ case QCOMTEE_MSG_OBJECT_OP_RELEASE:
+ consumed = async_release(oic, async_msg,
+ async_buffer.size - used);
+ break;
+ default:
+ pr_err("Unsupported async message %u\n", async_msg->op);
+ goto out;
+ }
+
+ /* Supported operation but unable to parse the message. */
+ if (!consumed) {
+ pr_err("Unable to parse async message for op %u\n",
+ async_msg->op);
+ goto out;
+ }
+
+ /* Next async message. */
+ used += qcomtee_msg_offset_align(consumed);
+ }
+
+out:
+ /* Reset the async buffer so async requests do not loop to QTEE. */
+ memzero_explicit(async_buffer.addr, async_buffer.size);
+}
diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c
new file mode 100644
index 000000000000..65f9140d4e1f
--- /dev/null
+++ b/drivers/tee/qcomtee/call.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/tee.h>
+#include <linux/platform_device.h>
+#include <linux/xarray.h>
+
+#include "qcomtee.h"
+
+static int find_qtee_object(struct qcomtee_object **object, unsigned long id,
+ struct qcomtee_context_data *ctxdata)
+{
+ int err = 0;
+
+ guard(rcu)();
+ /* Object release is RCU protected. */
+ *object = idr_find(&ctxdata->qtee_objects_idr, id);
+ if (!qcomtee_object_get(*object))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void del_qtee_object(unsigned long id,
+ struct qcomtee_context_data *ctxdata)
+{
+ struct qcomtee_object *object;
+
+ scoped_guard(mutex, &ctxdata->qtee_lock)
+ object = idr_remove(&ctxdata->qtee_objects_idr, id);
+
+ qcomtee_object_put(object);
+}
+
+/**
+ * qcomtee_context_add_qtee_object() - Add a QTEE object to the context.
+ * @param: TEE parameter representing @object.
+ * @object: QTEE object.
+ * @ctx: context to add the object.
+ *
+ * It assumes @object is %QCOMTEE_OBJECT_TYPE_TEE and the caller has already
+ * issued qcomtee_object_get() for @object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_context_add_qtee_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx)
+{
+ int ret;
+ struct qcomtee_context_data *ctxdata = ctx->data;
+
+ scoped_guard(mutex, &ctxdata->qtee_lock)
+ ret = idr_alloc(&ctxdata->qtee_objects_idr, object, 0, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ param->u.objref.id = ret;
+ /* QTEE Object: QCOMTEE_OBJREF_FLAG_TEE set. */
+ param->u.objref.flags = QCOMTEE_OBJREF_FLAG_TEE;
+
+ return 0;
+}
+
+/* Retrieve the QTEE object added with qcomtee_context_add_qtee_object(). */
+int qcomtee_context_find_qtee_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+
+ return find_qtee_object(object, param->u.objref.id, ctxdata);
+}
+
+/**
+ * qcomtee_context_del_qtee_object() - Delete a QTEE object from the context.
+ * @param: TEE parameter representing @object.
+ * @ctx: context for deleting the object.
+ *
+ * The @param has been initialized by qcomtee_context_add_qtee_object().
+ */
+void qcomtee_context_del_qtee_object(struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ /* 'qtee_objects_idr' stores QTEE objects only. */
+ if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE)
+ del_qtee_object(param->u.objref.id, ctxdata);
+}
+
+/**
+ * qcomtee_objref_to_arg() - Convert OBJREF parameter to QTEE argument.
+ * @arg: QTEE argument.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It assumes @param is an OBJREF.
+ * It does not set @arg.type; the caller should initialize it to a correct
+ * &enum qcomtee_arg_type value. It gets the object's refcount in @arg;
+ * the caller should manage to put it afterward.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param,
+ struct tee_context *ctx)
+{
+ int err = -EINVAL;
+
+ arg->o = NULL_QCOMTEE_OBJECT;
+ /* param is a NULL object: */
+ if (param->u.objref.id == TEE_OBJREF_NULL)
+ return 0;
+
+ /* param is a callback object: */
+ if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_USER)
+ err = qcomtee_user_param_to_object(&arg->o, param, ctx);
+ /* param is a QTEE object: */
+ else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE)
+ err = qcomtee_context_find_qtee_object(&arg->o, param, ctx);
+ /* param is a memory object: */
+ else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_MEM)
+ err = qcomtee_memobj_param_to_object(&arg->o, param, ctx);
+
+ /*
+ * For callback objects, call qcomtee_object_get() to keep a temporary
+ * copy for the driver, as these objects are released asynchronously
+ * and may disappear even before returning from QTEE.
+ *
+ * - For direct object invocations, the matching put is called in
+ * qcomtee_object_invoke() when parsing the QTEE response.
+ * - For callback responses, put is called in qcomtee_user_object_notify()
+ * after QTEE has received its copies.
+ */
+
+ if (!err && (typeof_qcomtee_object(arg->o) == QCOMTEE_OBJECT_TYPE_CB))
+ qcomtee_object_get(arg->o);
+
+ return err;
+}
+
+/**
+ * qcomtee_objref_from_arg() - Convert QTEE argument to OBJREF param.
+ * @param: TEE parameter.
+ * @arg: QTEE argument.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It assumes @arg is of %QCOMTEE_ARG_TYPE_IO or %QCOMTEE_ARG_TYPE_OO.
+ * It does not set @param.attr; the caller should initialize it to a
+ * correct type.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg,
+ struct tee_context *ctx)
+{
+ struct qcomtee_object *object = arg->o;
+
+ switch (typeof_qcomtee_object(object)) {
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ param->u.objref.id = TEE_OBJREF_NULL;
+
+ return 0;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ /* object is a callback object: */
+ if (is_qcomtee_user_object(object))
+ return qcomtee_user_param_from_object(param, object,
+ ctx);
+ /* object is a memory object: */
+ else if (is_qcomtee_memobj_object(object))
+ return qcomtee_memobj_param_from_object(param, object,
+ ctx);
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ return qcomtee_context_add_qtee_object(param, object, ctx);
+
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_params_to_args() - Convert TEE parameters to QTEE arguments.
+ * @u: QTEE arguments.
+ * @params: TEE parameters.
+ * @num_params: number of elements in the parameter array.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It assumes @u has at least @num_params + 1 entries and has been initialized
+ * with %QCOMTEE_ARG_TYPE_INV as &struct qcomtee_arg.type.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_params_to_args(struct qcomtee_arg *u,
+ struct tee_param *params, int num_params,
+ struct tee_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < num_params; i++) {
+ switch (params[i].attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ u[i].flags = QCOMTEE_ARG_FLAGS_UADDR;
+ u[i].b.uaddr = params[i].u.ubuf.uaddr;
+ u[i].b.size = params[i].u.ubuf.size;
+
+ if (params[i].attr ==
+ TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT)
+ u[i].type = QCOMTEE_ARG_TYPE_IB;
+ else /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */
+ u[i].type = QCOMTEE_ARG_TYPE_OB;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ u[i].type = QCOMTEE_ARG_TYPE_IO;
+ if (qcomtee_objref_to_arg(&u[i], &params[i], ctx))
+ goto out_failed;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ u[i].type = QCOMTEE_ARG_TYPE_OO;
+ u[i].o = NULL_QCOMTEE_OBJECT;
+ break;
+ default:
+ goto out_failed;
+ }
+ }
+
+ return 0;
+
+out_failed:
+ /* Undo qcomtee_objref_to_arg(). */
+ for (i--; i >= 0; i--) {
+ if (u[i].type != QCOMTEE_ARG_TYPE_IO)
+ continue;
+
+ qcomtee_user_object_set_notify(u[i].o, false);
+ /* See docs for qcomtee_objref_to_arg() for double put. */
+ if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)
+ qcomtee_object_put(u[i].o);
+
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_params_from_args() - Convert QTEE arguments to TEE parameters.
+ * @params: TEE parameters.
+ * @u: QTEE arguments.
+ * @num_params: number of elements in the parameter array.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @u should have already been initialized by qcomtee_params_to_args().
+ * This also represents the end of a QTEE invocation that started with
+ * qcomtee_params_to_args() by releasing %QCOMTEE_ARG_TYPE_IO objects.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_params_from_args(struct tee_param *params,
+ struct qcomtee_arg *u, int num_params,
+ struct tee_context *ctx)
+{
+ int i, np;
+
+ qcomtee_arg_for_each(np, u) {
+ switch (u[np].type) {
+ case QCOMTEE_ARG_TYPE_OB:
+ /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */
+ params[np].u.ubuf.size = u[np].b.size;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IO:
+ /* IEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT */
+ qcomtee_object_put(u[np].o);
+
+ break;
+ case QCOMTEE_ARG_TYPE_OO:
+ /* TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT */
+ if (qcomtee_objref_from_arg(&params[np], &u[np], ctx))
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IB:
+ default:
+ break;
+ }
+ }
+
+ return 0;
+
+out_failed:
+ /* Undo qcomtee_objref_from_arg(). */
+ for (i = 0; i < np; i++) {
+ if (params[i].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT)
+ qcomtee_context_del_qtee_object(&params[i], ctx);
+ }
+
+ /* Release any IO and OO objects not processed. */
+ for (; i < num_params && u[i].type; i++) {
+ if (u[i].type == QCOMTEE_ARG_TYPE_OO ||
+ u[i].type == QCOMTEE_ARG_TYPE_IO)
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/* TEE Device Ops. */
+
+static int qcomtee_params_check(struct tee_param *params, int num_params)
+{
+ int io = 0, oo = 0, ib = 0, ob = 0;
+ int i;
+
+ /* QTEE can accept 64 arguments. */
+ if (num_params > QCOMTEE_ARGS_MAX)
+ return -EINVAL;
+
+ /* Supported parameter types. */
+ for (i = 0; i < num_params; i++) {
+ switch (params[i].attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ ib++;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ ob++;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ io++;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ oo++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* QTEE can accept 16 arguments of each supported types. */
+ if (io > QCOMTEE_ARGS_PER_TYPE || oo > QCOMTEE_ARGS_PER_TYPE ||
+ ib > QCOMTEE_ARGS_PER_TYPE || ob > QCOMTEE_ARGS_PER_TYPE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Check if an operation on ROOT_QCOMTEE_OBJECT from userspace is permitted. */
+static int qcomtee_root_object_check(u32 op, struct tee_param *params,
+ int num_params)
+{
+ /* Some privileged operations recognized by QTEE. */
+ if (op == QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE ||
+ op == QCOMTEE_ROOT_OP_ADCI_ACCEPT ||
+ op == QCOMTEE_ROOT_OP_ADCI_SHUTDOWN)
+ return -EINVAL;
+
+ /*
+ * QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS is to register with QTEE
+ * by passing a credential object as input OBJREF. TEE_OBJREF_NULL as a
+ * credential object represents a privileged client for QTEE and
+ * is used by the kernel only.
+ */
+ if (op == QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS && num_params == 2) {
+ if (params[0].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT &&
+ params[1].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) {
+ if (params[0].u.objref.id == TEE_OBJREF_NULL)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qcomtee_object_invoke() - Invoke a QTEE object.
+ * @ctx: TEE context.
+ * @arg: ioctl arguments.
+ * @params: parameters for the object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_object_invoke(struct tee_context *ctx,
+ struct tee_ioctl_object_invoke_arg *arg,
+ struct tee_param *params)
+{
+ struct qcomtee_object_invoke_ctx *oic __free(kfree) = NULL;
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_arg *u __free(kfree) = NULL;
+ struct qcomtee_object *object;
+ int i, ret, result;
+
+ if (qcomtee_params_check(params, arg->num_params))
+ return -EINVAL;
+
+ /* First, handle reserved operations: */
+ if (arg->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) {
+ del_qtee_object(arg->id, ctxdata);
+
+ return 0;
+ }
+
+ /* Otherwise, invoke a QTEE object: */
+ oic = qcomtee_object_invoke_ctx_alloc(ctx);
+ if (!oic)
+ return -ENOMEM;
+
+ /* +1 for ending QCOMTEE_ARG_TYPE_INV. */
+ u = kcalloc(arg->num_params + 1, sizeof(*u), GFP_KERNEL);
+ if (!u)
+ return -ENOMEM;
+
+ /* Get an object to invoke. */
+ if (arg->id == TEE_OBJREF_NULL) {
+ /* Use ROOT if TEE_OBJREF_NULL is invoked. */
+ if (qcomtee_root_object_check(arg->op, params, arg->num_params))
+ return -EINVAL;
+
+ object = ROOT_QCOMTEE_OBJECT;
+ } else if (find_qtee_object(&object, arg->id, ctxdata)) {
+ return -EINVAL;
+ }
+
+ ret = qcomtee_params_to_args(u, params, arg->num_params, ctx);
+ if (ret)
+ goto out;
+
+ ret = qcomtee_object_do_invoke(oic, object, arg->op, u, &result);
+ if (ret) {
+ qcomtee_arg_for_each_input_object(i, u) {
+ qcomtee_user_object_set_notify(u[i].o, false);
+ qcomtee_object_put(u[i].o);
+ }
+
+ goto out;
+ }
+
+ /* Prase QTEE response and put driver's object copies: */
+
+ if (!result) {
+ /* Assume service is UNAVAIL if unable to process the result. */
+ if (qcomtee_params_from_args(params, u, arg->num_params, ctx))
+ result = QCOMTEE_MSG_ERROR_UNAVAIL;
+ } else {
+ /*
+ * qcomtee_params_to_args() gets a copy of IO for the driver to
+ * make sure they do not get released while in the middle of
+ * invocation. On success (!result), qcomtee_params_from_args()
+ * puts them; Otherwise, put them here.
+ */
+ qcomtee_arg_for_each_input_object(i, u)
+ qcomtee_object_put(u[i].o);
+ }
+
+ arg->ret = result;
+out:
+ qcomtee_object_put(object);
+
+ return ret;
+}
+
+/**
+ * qcomtee_supp_recv() - Wait for a request for the supplicant.
+ * @ctx: TEE context.
+ * @op: requested operation on the object.
+ * @num_params: number of elements in the parameter array.
+ * @params: parameters for @op.
+ *
+ * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT.
+ * On input, it provides a user buffer. This buffer is used for parameters of
+ * type %TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT in qcomtee_cb_params_from_args().
+ * On output, the object ID and request ID are stored in the meta parameter.
+ *
+ * @num_params is updated to the number of parameters that actually exist
+ * in @params on return.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_supp_recv(struct tee_context *ctx, u32 *op, u32 *num_params,
+ struct tee_param *params)
+{
+ struct qcomtee_user_object_request_data data;
+ void __user *uaddr;
+ size_t ubuf_size;
+ int i, ret;
+
+ if (!*num_params)
+ return -EINVAL;
+
+ /* First parameter should be an INOUT + meta parameter. */
+ if (params->attr !=
+ (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | TEE_IOCTL_PARAM_ATTR_META))
+ return -EINVAL;
+
+ /* Other parameters are none. */
+ for (i = 1; i < *num_params; i++)
+ if (params[i].attr)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(params->u.value.a, 8))
+ return -EINVAL;
+
+ /* User buffer and size from meta parameter. */
+ uaddr = u64_to_user_ptr(params->u.value.a);
+ ubuf_size = params->u.value.b;
+ /* Process TEE parameters. +/-1 to ignore the meta parameter. */
+ ret = qcomtee_user_object_select(ctx, params + 1, *num_params - 1,
+ uaddr, ubuf_size, &data);
+ if (ret)
+ return ret;
+
+ params->u.value.a = data.object_id;
+ params->u.value.b = data.id;
+ params->u.value.c = 0;
+ *op = data.op;
+ *num_params = data.np + 1;
+
+ return 0;
+}
+
+/**
+ * qcomtee_supp_send() - Submit a response for a request.
+ * @ctx: TEE context.
+ * @errno: return value for the request.
+ * @num_params: number of elements in the parameter array.
+ * @params: returned parameters.
+ *
+ * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT.
+ * It specifies the request ID this response belongs to.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_supp_send(struct tee_context *ctx, u32 errno, u32 num_params,
+ struct tee_param *params)
+{
+ int req_id;
+
+ if (!num_params)
+ return -EINVAL;
+
+ /* First parameter should be an OUTPUT + meta parameter. */
+ if (params->attr != (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT |
+ TEE_IOCTL_PARAM_ATTR_META))
+ return -EINVAL;
+
+ req_id = params->u.value.a;
+ /* Process TEE parameters. +/-1 to ignore the meta parameter. */
+ return qcomtee_user_object_submit(ctx, params + 1, num_params - 1,
+ req_id, errno);
+}
+
+static int qcomtee_open(struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata __free(kfree) = NULL;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ /*
+ * In the QTEE driver, the same context is used to refcount resources
+ * shared by QTEE. For example, teedev_ctx_get() is called for any
+ * instance of callback objects (see qcomtee_user_param_to_object()).
+ *
+ * Maintain a copy of teedev for QTEE as it serves as a direct user of
+ * this context. The teedev will be released in the context's release().
+ *
+ * tee_device_unregister() will remain blocked until all contexts
+ * are released. This includes contexts owned by the user, which are
+ * closed by teedev_close_context(), as well as those owned by QTEE
+ * closed by teedev_ctx_put() in object's release().
+ */
+ if (!tee_device_get(ctx->teedev))
+ return -EINVAL;
+
+ idr_init(&ctxdata->qtee_objects_idr);
+ mutex_init(&ctxdata->qtee_lock);
+ idr_init(&ctxdata->reqs_idr);
+ INIT_LIST_HEAD(&ctxdata->reqs_list);
+ mutex_init(&ctxdata->reqs_lock);
+ init_completion(&ctxdata->req_c);
+
+ ctx->data = no_free_ptr(ctxdata);
+
+ return 0;
+}
+
+/* Gets called when the user closes the device */
+static void qcomtee_close_context(struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_object *object;
+ int id;
+
+ /* Process QUEUED or PROCESSING requests. */
+ qcomtee_requests_destroy(ctxdata);
+ /* Release QTEE objects. */
+ idr_for_each_entry(&ctxdata->qtee_objects_idr, object, id)
+ qcomtee_object_put(object);
+}
+
+/* Gets called when the final reference to the context goes away. */
+static void qcomtee_release(struct tee_context *ctx)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+
+ idr_destroy(&ctxdata->qtee_objects_idr);
+ idr_destroy(&ctxdata->reqs_idr);
+ kfree(ctxdata);
+
+ /* There is nothing shared in this context with QTEE. */
+ tee_device_put(ctx->teedev);
+}
+
+static void qcomtee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_QTEE,
+ .gen_caps = TEE_GEN_CAP_OBJREF,
+ };
+
+ *vers = v;
+}
+
+/**
+ * qcomtee_get_qtee_feature_list() - Query QTEE features versions.
+ * @ctx: TEE context.
+ * @id: ID of the feature to query.
+ * @version: version of the feature.
+ *
+ * Used to query the verion of features supported by QTEE.
+ */
+static void qcomtee_get_qtee_feature_list(struct tee_context *ctx, u32 id,
+ u32 *version)
+{
+ struct qcomtee_object_invoke_ctx *oic __free(kfree) = NULL;
+ struct qcomtee_object *client_env, *service;
+ struct qcomtee_arg u[3] = { 0 };
+ int result;
+
+ oic = qcomtee_object_invoke_ctx_alloc(ctx);
+ if (!oic)
+ return;
+
+ client_env = qcomtee_object_get_client_env(oic);
+ if (client_env == NULL_QCOMTEE_OBJECT)
+ return;
+
+ /* Get ''FeatureVersions Service'' object. */
+ service = qcomtee_object_get_service(oic, client_env,
+ QCOMTEE_FEATURE_VER_UID);
+ if (service == NULL_QCOMTEE_OBJECT)
+ goto out_failed;
+
+ /* IB: Feature to query. */
+ u[0].b.addr = &id;
+ u[0].b.size = sizeof(id);
+ u[0].type = QCOMTEE_ARG_TYPE_IB;
+
+ /* OB: Version returned. */
+ u[1].b.addr = version;
+ u[1].b.size = sizeof(*version);
+ u[1].type = QCOMTEE_ARG_TYPE_OB;
+
+ qcomtee_object_do_invoke(oic, service, QCOMTEE_FEATURE_VER_OP_GET, u,
+ &result);
+
+out_failed:
+ qcomtee_object_put(service);
+ qcomtee_object_put(client_env);
+}
+
+static const struct tee_driver_ops qcomtee_ops = {
+ .get_version = qcomtee_get_version,
+ .open = qcomtee_open,
+ .close_context = qcomtee_close_context,
+ .release = qcomtee_release,
+ .object_invoke_func = qcomtee_object_invoke,
+ .supp_recv = qcomtee_supp_recv,
+ .supp_send = qcomtee_supp_send,
+};
+
+static const struct tee_desc qcomtee_desc = {
+ .name = "qcomtee",
+ .ops = &qcomtee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int qcomtee_probe(struct platform_device *pdev)
+{
+ struct workqueue_struct *async_wq;
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct tee_context *ctx;
+ struct qcomtee *qcomtee;
+ int err;
+
+ qcomtee = kzalloc(sizeof(*qcomtee), GFP_KERNEL);
+ if (!qcomtee)
+ return -ENOMEM;
+
+ pool = qcomtee_shm_pool_alloc();
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+
+ goto err_free_qcomtee;
+ }
+
+ teedev = tee_device_alloc(&qcomtee_desc, NULL, pool, qcomtee);
+ if (IS_ERR(teedev)) {
+ err = PTR_ERR(teedev);
+
+ goto err_pool_destroy;
+ }
+
+ qcomtee->teedev = teedev;
+ qcomtee->pool = pool;
+ err = tee_device_register(qcomtee->teedev);
+ if (err)
+ goto err_unreg_teedev;
+
+ platform_set_drvdata(pdev, qcomtee);
+ /* Start async wq. */
+ async_wq = alloc_ordered_workqueue("qcomtee_wq", 0);
+ if (!async_wq) {
+ err = -ENOMEM;
+
+ goto err_unreg_teedev;
+ }
+
+ qcomtee->wq = async_wq;
+ /* Driver context used for async operations of teedev. */
+ ctx = teedev_open(qcomtee->teedev);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+
+ goto err_dest_wq;
+ }
+
+ qcomtee->ctx = ctx;
+ /* Init Object table. */
+ qcomtee->xa_last_id = 0;
+ xa_init_flags(&qcomtee->xa_local_objects, XA_FLAGS_ALLOC);
+ /* Get QTEE verion. */
+ qcomtee_get_qtee_feature_list(qcomtee->ctx,
+ QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID,
+ &qcomtee->qtee_version);
+
+ pr_info("QTEE version %u.%u.%u\n",
+ QTEE_VERSION_GET_MAJOR(qcomtee->qtee_version),
+ QTEE_VERSION_GET_MINOR(qcomtee->qtee_version),
+ QTEE_VERSION_GET_PATCH(qcomtee->qtee_version));
+
+ return 0;
+
+err_dest_wq:
+ destroy_workqueue(qcomtee->wq);
+err_unreg_teedev:
+ tee_device_unregister(qcomtee->teedev);
+err_pool_destroy:
+ tee_shm_pool_free(pool);
+err_free_qcomtee:
+ kfree(qcomtee);
+
+ return err;
+}
+
+/**
+ * qcomtee_remove() - Device Removal Routine.
+ * @pdev: platform device information struct.
+ *
+ * It is called by the platform subsystem to alert the driver that it should
+ * release the device.
+ *
+ * QTEE does not provide an API to inform it about a callback object going away.
+ * However, when releasing QTEE objects, any callback object sent to QTEE
+ * previously would be released by QTEE as part of the object release.
+ */
+static void qcomtee_remove(struct platform_device *pdev)
+{
+ struct qcomtee *qcomtee = platform_get_drvdata(pdev);
+
+ teedev_close_context(qcomtee->ctx);
+ /* Wait for RELEASE operations to be processed for QTEE objects. */
+ tee_device_unregister(qcomtee->teedev);
+ destroy_workqueue(qcomtee->wq);
+ tee_shm_pool_free(qcomtee->pool);
+ kfree(qcomtee);
+}
+
+static const struct platform_device_id qcomtee_ids[] = { { "qcomtee", 0 }, {} };
+MODULE_DEVICE_TABLE(platform, qcomtee_ids);
+
+static struct platform_driver qcomtee_platform_driver = {
+ .probe = qcomtee_probe,
+ .remove = qcomtee_remove,
+ .driver = {
+ .name = "qcomtee",
+ },
+ .id_table = qcomtee_ids,
+};
+
+module_platform_driver(qcomtee_platform_driver);
+
+MODULE_AUTHOR("Qualcomm");
+MODULE_DESCRIPTION("QTEE driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c
new file mode 100644
index 000000000000..ecd04403591c
--- /dev/null
+++ b/drivers/tee/qcomtee/core.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
+
+#include "qcomtee.h"
+
+/* QTEE root object. */
+struct qcomtee_object qcomtee_object_root = {
+ .name = "root",
+ .object_type = QCOMTEE_OBJECT_TYPE_ROOT,
+ .info.qtee_id = QCOMTEE_MSG_OBJECT_ROOT,
+};
+
+/* Next argument of type @type after index @i. */
+int qcomtee_next_arg_type(struct qcomtee_arg *u, int i,
+ enum qcomtee_arg_type type)
+{
+ while (u[i].type != QCOMTEE_ARG_TYPE_INV && u[i].type != type)
+ i++;
+ return i;
+}
+
+/*
+ * QTEE expects IDs with QCOMTEE_MSG_OBJECT_NS_BIT set for objects of
+ * QCOMTEE_OBJECT_TYPE_CB type. The first ID with QCOMTEE_MSG_OBJECT_NS_BIT
+ * set is reserved for the primordial object.
+ */
+#define QCOMTEE_OBJECT_PRIMORDIAL (QCOMTEE_MSG_OBJECT_NS_BIT)
+#define QCOMTEE_OBJECT_ID_START (QCOMTEE_OBJECT_PRIMORDIAL + 1)
+#define QCOMTEE_OBJECT_ID_END (U32_MAX)
+
+#define QCOMTEE_OBJECT_SET(p, type, ...) \
+ __QCOMTEE_OBJECT_SET(p, type, ##__VA_ARGS__, 0UL)
+#define __QCOMTEE_OBJECT_SET(p, type, optr, ...) \
+ do { \
+ (p)->object_type = (type); \
+ (p)->info.qtee_id = (unsigned long)(optr); \
+ } while (0)
+
+static struct qcomtee_object *
+qcomtee_qtee_object_alloc(struct qcomtee_object_invoke_ctx *oic,
+ unsigned int object_id)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+ struct qcomtee_object *object;
+
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object)
+ return NULL_QCOMTEE_OBJECT;
+
+ /* If failed, "no-name". */
+ object->name = kasprintf(GFP_KERNEL, "qcomtee-%u", object_id);
+ QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_TEE, object_id);
+ kref_init(&object->refcount);
+ /* A QTEE object requires a context for async operations. */
+ object->info.qcomtee_async_ctx = qcomtee->ctx;
+ teedev_ctx_get(object->info.qcomtee_async_ctx);
+
+ return object;
+}
+
+static void qcomtee_qtee_object_free(struct qcomtee_object *object)
+{
+ /* See qcomtee_qtee_object_alloc(). */
+ teedev_ctx_put(object->info.qcomtee_async_ctx);
+
+ kfree(object->name);
+ kfree(object);
+}
+
+static void qcomtee_do_release_qtee_object(struct work_struct *work)
+{
+ struct qcomtee_object *object;
+ struct qcomtee *qcomtee;
+ int ret, result = 0;
+
+ /* RELEASE does not require any argument. */
+ struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } };
+
+ object = container_of(work, struct qcomtee_object, work);
+ qcomtee = tee_get_drvdata(object->info.qcomtee_async_ctx->teedev);
+ /* Get the TEE context used for asynchronous operations. */
+ qcomtee->oic.ctx = object->info.qcomtee_async_ctx;
+
+ ret = qcomtee_object_do_invoke_internal(&qcomtee->oic, object,
+ QCOMTEE_MSG_OBJECT_OP_RELEASE,
+ args, &result);
+
+ /* Is it safe to retry the release? */
+ if (ret && ret != -ENODEV) {
+ queue_work(qcomtee->wq, &object->work);
+ } else {
+ if (ret || result)
+ pr_err("%s release failed, ret = %d (%x)\n",
+ qcomtee_object_name(object), ret, result);
+ qcomtee_qtee_object_free(object);
+ }
+}
+
+static void qcomtee_release_qtee_object(struct qcomtee_object *object)
+{
+ struct qcomtee *qcomtee =
+ tee_get_drvdata(object->info.qcomtee_async_ctx->teedev);
+
+ INIT_WORK(&object->work, qcomtee_do_release_qtee_object);
+ queue_work(qcomtee->wq, &object->work);
+}
+
+static void qcomtee_object_release(struct kref *refcount)
+{
+ struct qcomtee_object *object;
+ const char *name;
+
+ object = container_of(refcount, struct qcomtee_object, refcount);
+
+ /*
+ * qcomtee_object_get() is called in a RCU read lock. synchronize_rcu()
+ * to avoid releasing the object while it is being accessed in
+ * qcomtee_object_get().
+ */
+ synchronize_rcu();
+
+ switch (typeof_qcomtee_object(object)) {
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ qcomtee_release_qtee_object(object);
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ name = object->name;
+
+ if (object->ops->release)
+ object->ops->release(object);
+
+ kfree_const(name);
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ default:
+ break;
+ }
+}
+
+/**
+ * qcomtee_object_get() - Increase the object's reference count.
+ * @object: object to increase the reference count.
+ *
+ * Context: The caller should hold RCU read lock.
+ */
+int qcomtee_object_get(struct qcomtee_object *object)
+{
+ if (object != &qcomtee_primordial_object &&
+ object != NULL_QCOMTEE_OBJECT &&
+ object != ROOT_QCOMTEE_OBJECT)
+ return kref_get_unless_zero(&object->refcount);
+
+ return 0;
+}
+
+/**
+ * qcomtee_object_put() - Decrease the object's reference count.
+ * @object: object to decrease the reference count.
+ */
+void qcomtee_object_put(struct qcomtee_object *object)
+{
+ if (object != &qcomtee_primordial_object &&
+ object != NULL_QCOMTEE_OBJECT &&
+ object != ROOT_QCOMTEE_OBJECT)
+ kref_put(&object->refcount, qcomtee_object_release);
+}
+
+static int qcomtee_idx_alloc(struct qcomtee_object_invoke_ctx *oic, u32 *idx,
+ struct qcomtee_object *object)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+
+ /* Every ID allocated here has QCOMTEE_MSG_OBJECT_NS_BIT set. */
+ return xa_alloc_cyclic(&qcomtee->xa_local_objects, idx, object,
+ XA_LIMIT(QCOMTEE_OBJECT_ID_START,
+ QCOMTEE_OBJECT_ID_END),
+ &qcomtee->xa_last_id, GFP_KERNEL);
+}
+
+struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic,
+ u32 idx)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+
+ if (idx < QCOMTEE_OBJECT_ID_START || idx > QCOMTEE_OBJECT_ID_END)
+ return NULL_QCOMTEE_OBJECT;
+
+ return xa_erase(&qcomtee->xa_local_objects, idx);
+}
+
+/**
+ * qcomtee_object_id_get() - Get an ID for an object to send to QTEE.
+ * @oic: context to use for the invocation.
+ * @object: object to assign an ID.
+ * @object_id: object ID.
+ *
+ * Called on the path to QTEE to construct the message; see
+ * qcomtee_prepare_msg() and qcomtee_update_msg().
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_object_id_get(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object,
+ unsigned int *object_id)
+{
+ u32 idx;
+
+ switch (typeof_qcomtee_object(object)) {
+ case QCOMTEE_OBJECT_TYPE_CB:
+ if (qcomtee_idx_alloc(oic, &idx, object) < 0)
+ return -ENOSPC;
+
+ *object_id = idx;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ *object_id = object->info.qtee_id;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ *object_id = QCOMTEE_MSG_OBJECT_NULL;
+
+ break;
+ }
+
+ return 0;
+}
+
+/* Release object ID assigned in qcomtee_object_id_get. */
+static void qcomtee_object_id_put(struct qcomtee_object_invoke_ctx *oic,
+ unsigned int object_id)
+{
+ qcomtee_idx_erase(oic, object_id);
+}
+
+/**
+ * qcomtee_local_object_get() - Get the object referenced by the ID.
+ * @oic: context to use for the invocation.
+ * @object_id: object ID.
+ *
+ * It is called on the path from QTEE.
+ * It is called on behalf of QTEE to obtain an instance of an object
+ * for a given ID. It increases the object's reference count on success.
+ *
+ * Return: On error, returns %NULL_QCOMTEE_OBJECT.
+ * On success, returns the object.
+ */
+static struct qcomtee_object *
+qcomtee_local_object_get(struct qcomtee_object_invoke_ctx *oic,
+ unsigned int object_id)
+{
+ struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev);
+ struct qcomtee_object *object;
+
+ if (object_id == QCOMTEE_OBJECT_PRIMORDIAL)
+ return &qcomtee_primordial_object;
+
+ guard(rcu)();
+ object = xa_load(&qcomtee->xa_local_objects, object_id);
+ /* It already checks for %NULL_QCOMTEE_OBJECT. */
+ qcomtee_object_get(object);
+
+ return object;
+}
+
+/**
+ * qcomtee_object_user_init() - Initialize an object for the user.
+ * @object: object to initialize.
+ * @ot: type of object as &enum qcomtee_object_type.
+ * @ops: instance of callbacks.
+ * @fmt: name assigned to the object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_user_init(struct qcomtee_object *object,
+ enum qcomtee_object_type ot,
+ struct qcomtee_object_operations *ops,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ kref_init(&object->refcount);
+ QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_NULL);
+
+ va_start(ap, fmt);
+ switch (ot) {
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ ret = 0;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ object->ops = ops;
+ if (!object->ops->dispatch)
+ return -EINVAL;
+
+ /* If failed, "no-name". */
+ object->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+ QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_CB);
+
+ ret = 0;
+ break;
+ case QCOMTEE_OBJECT_TYPE_ROOT:
+ case QCOMTEE_OBJECT_TYPE_TEE:
+ default:
+ ret = -EINVAL;
+ }
+ va_end(ap);
+
+ return ret;
+}
+
+/**
+ * qcomtee_object_type() - Returns the type of object represented by an ID.
+ * @object_id: object ID for the object.
+ *
+ * Similar to typeof_qcomtee_object(), but instead of receiving an object as
+ * an argument, it receives an object ID. It is used internally on the return
+ * path from QTEE.
+ *
+ * Return: Returns the type of object referenced by @object_id.
+ */
+static enum qcomtee_object_type qcomtee_object_type(unsigned int object_id)
+{
+ if (object_id == QCOMTEE_MSG_OBJECT_NULL)
+ return QCOMTEE_OBJECT_TYPE_NULL;
+
+ if (object_id & QCOMTEE_MSG_OBJECT_NS_BIT)
+ return QCOMTEE_OBJECT_TYPE_CB;
+
+ return QCOMTEE_OBJECT_TYPE_TEE;
+}
+
+/**
+ * qcomtee_object_qtee_init() - Initialize an object for QTEE.
+ * @oic: context to use for the invocation.
+ * @object: object returned.
+ * @object_id: object ID received from QTEE.
+ *
+ * Return: On failure, returns < 0 and sets @object to %NULL_QCOMTEE_OBJECT.
+ * On success, returns 0
+ */
+static int qcomtee_object_qtee_init(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object **object,
+ unsigned int object_id)
+{
+ int ret = 0;
+
+ switch (qcomtee_object_type(object_id)) {
+ case QCOMTEE_OBJECT_TYPE_NULL:
+ *object = NULL_QCOMTEE_OBJECT;
+
+ break;
+ case QCOMTEE_OBJECT_TYPE_CB:
+ *object = qcomtee_local_object_get(oic, object_id);
+ if (*object == NULL_QCOMTEE_OBJECT)
+ ret = -EINVAL;
+
+ break;
+
+ default: /* QCOMTEE_OBJECT_TYPE_TEE */
+ *object = qcomtee_qtee_object_alloc(oic, object_id);
+ if (*object == NULL_QCOMTEE_OBJECT)
+ ret = -ENOMEM;
+
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * ''Marshaling API''
+ * qcomtee_prepare_msg - Prepare the inbound buffer for sending to QTEE
+ * qcomtee_update_args - Parse the QTEE response in the inbound buffer
+ * qcomtee_prepare_args - Parse the QTEE request from the outbound buffer
+ * qcomtee_update_msg - Update the outbound buffer with the response for QTEE
+ */
+
+static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u)
+{
+ struct qcomtee_msg_object_invoke *msg;
+ unsigned int object_id;
+ int i, ib, ob, io, oo;
+ size_t offset;
+
+ /* Use the input message buffer in 'oic'. */
+ msg = oic->in_msg.addr;
+
+ /* Start offset in a message for buffer arguments. */
+ offset = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke,
+ qcomtee_args_len(u));
+
+ /* Get the ID of the object being invoked. */
+ if (qcomtee_object_id_get(oic, object, &object_id))
+ return -ENOSPC;
+
+ ib = 0;
+ qcomtee_arg_for_each_input_buffer(i, u) {
+ void *msgptr; /* Address of buffer payload: */
+ /* Overflow already checked in qcomtee_msg_buffers_alloc(). */
+ msg->args[ib].b.offset = offset;
+ msg->args[ib].b.size = u[i].b.size;
+
+ msgptr = qcomtee_msg_offset_to_ptr(msg, offset);
+ /* Userspace client or kernel client!? */
+ if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
+ memcpy(msgptr, u[i].b.addr, u[i].b.size);
+ else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size))
+ return -EFAULT;
+
+ offset += qcomtee_msg_offset_align(u[i].b.size);
+ ib++;
+ }
+
+ ob = ib;
+ qcomtee_arg_for_each_output_buffer(i, u) {
+ /* Overflow already checked in qcomtee_msg_buffers_alloc(). */
+ msg->args[ob].b.offset = offset;
+ msg->args[ob].b.size = u[i].b.size;
+
+ offset += qcomtee_msg_offset_align(u[i].b.size);
+ ob++;
+ }
+
+ io = ob;
+ qcomtee_arg_for_each_input_object(i, u) {
+ if (qcomtee_object_id_get(oic, u[i].o, &msg->args[io].o)) {
+ qcomtee_object_id_put(oic, object_id);
+ for (io--; io >= ob; io--)
+ qcomtee_object_id_put(oic, msg->args[io].o);
+
+ return -ENOSPC;
+ }
+
+ io++;
+ }
+
+ oo = io;
+ qcomtee_arg_for_each_output_object(i, u)
+ oo++;
+
+ /* Set object, operation, and argument counts. */
+ qcomtee_msg_init(msg, object_id, op, ib, ob, io, oo);
+
+ return 0;
+}
+
+/**
+ * qcomtee_update_args() - Parse the QTEE response in the inbound buffer.
+ * @u: array of arguments for the invocation.
+ * @oic: context to use for the invocation.
+ *
+ * @u must be the same as the one used in qcomtee_prepare_msg() when
+ * initializing the inbound buffer.
+ *
+ * On failure, it continues processing the QTEE message. The caller should
+ * do the necessary cleanup, including calling qcomtee_object_put()
+ * on the output objects.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_update_args(struct qcomtee_arg *u,
+ struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_msg_object_invoke *msg;
+ int i, ib, ob, io, oo;
+ int ret = 0;
+
+ /* Use the input message buffer in 'oic'. */
+ msg = oic->in_msg.addr;
+
+ ib = 0;
+ qcomtee_arg_for_each_input_buffer(i, u)
+ ib++;
+
+ ob = ib;
+ qcomtee_arg_for_each_output_buffer(i, u) {
+ void *msgptr; /* Address of buffer payload: */
+ /* QTEE can override the size to a smaller value. */
+ u[i].b.size = msg->args[ob].b.size;
+
+ msgptr = qcomtee_msg_offset_to_ptr(msg, msg->args[ob].b.offset);
+ /* Userspace client or kernel client!? */
+ if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
+ memcpy(u[i].b.addr, msgptr, u[i].b.size);
+ else if (copy_to_user(u[i].b.uaddr, msgptr, u[i].b.size))
+ ret = -EINVAL;
+
+ ob++;
+ }
+
+ io = ob;
+ qcomtee_arg_for_each_input_object(i, u)
+ io++;
+
+ oo = io;
+ qcomtee_arg_for_each_output_object(i, u) {
+ if (qcomtee_object_qtee_init(oic, &u[i].o, msg->args[oo].o))
+ ret = -EINVAL;
+
+ oo++;
+ }
+
+ return ret;
+}
+
+/**
+ * qcomtee_prepare_args() - Parse the QTEE request from the outbound buffer.
+ * @oic: context to use for the invocation.
+ *
+ * It initializes &qcomtee_object_invoke_ctx->u based on the QTEE request in
+ * the outbound buffer. It sets %QCOMTEE_ARG_TYPE_INV at the end of the array.
+ *
+ * On failure, it continues processing the QTEE message. The caller should
+ * do the necessary cleanup, including calling qcomtee_object_put()
+ * on the input objects.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_prepare_args(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_msg_callback *msg;
+ int i, ret = 0;
+
+ /* Use the output message buffer in 'oic'. */
+ msg = oic->out_msg.addr;
+
+ qcomtee_msg_for_each_input_buffer(i, msg) {
+ oic->u[i].b.addr =
+ qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset);
+ oic->u[i].b.size = msg->args[i].b.size;
+ oic->u[i].type = QCOMTEE_ARG_TYPE_IB;
+ }
+
+ qcomtee_msg_for_each_output_buffer(i, msg) {
+ oic->u[i].b.addr =
+ qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset);
+ oic->u[i].b.size = msg->args[i].b.size;
+ oic->u[i].type = QCOMTEE_ARG_TYPE_OB;
+ }
+
+ qcomtee_msg_for_each_input_object(i, msg) {
+ if (qcomtee_object_qtee_init(oic, &oic->u[i].o, msg->args[i].o))
+ ret = -EINVAL;
+
+ oic->u[i].type = QCOMTEE_ARG_TYPE_IO;
+ }
+
+ qcomtee_msg_for_each_output_object(i, msg)
+ oic->u[i].type = QCOMTEE_ARG_TYPE_OO;
+
+ /* End of Arguments. */
+ oic->u[i].type = QCOMTEE_ARG_TYPE_INV;
+
+ return ret;
+}
+
+static int qcomtee_update_msg(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_msg_callback *msg;
+ int i, ib, ob, io, oo;
+
+ /* Use the output message buffer in 'oic'. */
+ msg = oic->out_msg.addr;
+
+ ib = 0;
+ qcomtee_arg_for_each_input_buffer(i, oic->u)
+ ib++;
+
+ ob = ib;
+ qcomtee_arg_for_each_output_buffer(i, oic->u) {
+ /* Only reduce size; never increase it. */
+ if (msg->args[ob].b.size < oic->u[i].b.size)
+ return -EINVAL;
+
+ msg->args[ob].b.size = oic->u[i].b.size;
+ ob++;
+ }
+
+ io = ob;
+ qcomtee_arg_for_each_input_object(i, oic->u)
+ io++;
+
+ oo = io;
+ qcomtee_arg_for_each_output_object(i, oic->u) {
+ if (qcomtee_object_id_get(oic, oic->u[i].o, &msg->args[oo].o)) {
+ for (oo--; oo >= io; oo--)
+ qcomtee_object_id_put(oic, msg->args[oo].o);
+
+ return -ENOSPC;
+ }
+
+ oo++;
+ }
+
+ return 0;
+}
+
+/* Invoke a callback object. */
+static void qcomtee_cb_object_invoke(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_msg_callback *msg)
+{
+ int i, errno;
+ u32 op;
+
+ /* Get the object being invoked. */
+ unsigned int object_id = msg->cxt;
+ struct qcomtee_object *object;
+
+ /* QTEE cannot invoke a NULL object or objects it hosts. */
+ if (qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_NULL ||
+ qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_TEE) {
+ errno = -EINVAL;
+ goto out;
+ }
+
+ object = qcomtee_local_object_get(oic, object_id);
+ if (object == NULL_QCOMTEE_OBJECT) {
+ errno = -EINVAL;
+ goto out;
+ }
+
+ oic->object = object;
+
+ /* Filter bits used by transport. */
+ op = msg->op & QCOMTEE_MSG_OBJECT_OP_MASK;
+
+ switch (op) {
+ case QCOMTEE_MSG_OBJECT_OP_RELEASE:
+ qcomtee_object_id_put(oic, object_id);
+ qcomtee_object_put(object);
+ errno = 0;
+
+ break;
+ case QCOMTEE_MSG_OBJECT_OP_RETAIN:
+ qcomtee_object_get(object);
+ errno = 0;
+
+ break;
+ default:
+ errno = qcomtee_prepare_args(oic);
+ if (errno) {
+ /* Release any object that arrived as input. */
+ qcomtee_arg_for_each_input_buffer(i, oic->u)
+ qcomtee_object_put(oic->u[i].o);
+
+ break;
+ }
+
+ errno = object->ops->dispatch(oic, object, op, oic->u);
+ if (!errno) {
+ /* On success, notify at the appropriate time. */
+ oic->flags |= QCOMTEE_OIC_FLAG_NOTIFY;
+ }
+ }
+
+out:
+
+ oic->errno = errno;
+}
+
+static int
+qcomtee_object_invoke_ctx_invoke(struct qcomtee_object_invoke_ctx *oic,
+ int *result, u64 *res_type)
+{
+ phys_addr_t out_msg_paddr;
+ phys_addr_t in_msg_paddr;
+ int ret;
+ u64 res;
+
+ tee_shm_get_pa(oic->out_shm, 0, &out_msg_paddr);
+ tee_shm_get_pa(oic->in_shm, 0, &in_msg_paddr);
+ if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY))
+ ret = qcom_scm_qtee_invoke_smc(in_msg_paddr, oic->in_msg.size,
+ out_msg_paddr, oic->out_msg.size,
+ &res, res_type);
+ else
+ ret = qcom_scm_qtee_callback_response(out_msg_paddr,
+ oic->out_msg.size,
+ &res, res_type);
+
+ if (ret)
+ pr_err("QTEE returned with %d.\n", ret);
+ else
+ *result = (int)res;
+
+ return ret;
+}
+
+/**
+ * qcomtee_qtee_objects_put() - Put the callback objects in the argument array.
+ * @u: array of arguments.
+ *
+ * When qcomtee_object_do_invoke_internal() is successfully invoked,
+ * QTEE takes ownership of the callback objects. If the invocation fails,
+ * qcomtee_object_do_invoke_internal() calls qcomtee_qtee_objects_put()
+ * to mimic the release of callback objects by QTEE.
+ */
+static void qcomtee_qtee_objects_put(struct qcomtee_arg *u)
+{
+ int i;
+
+ qcomtee_arg_for_each_input_object(i, u) {
+ if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)
+ qcomtee_object_put(u[i].o);
+ }
+}
+
+/**
+ * qcomtee_object_do_invoke_internal() - Submit an invocation for an object.
+ * @oic: context to use for the current invocation.
+ * @object: object being invoked.
+ * @op: requested operation on the object.
+ * @u: array of arguments for the current invocation.
+ * @result: result returned from QTEE.
+ *
+ * The caller is responsible for keeping track of the refcount for each
+ * object, including @object. On return, the caller loses ownership of all
+ * input objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result)
+{
+ struct qcomtee_msg_callback *cb_msg;
+ struct qcomtee_object *qto;
+ int i, ret, errno;
+ u64 res_type;
+
+ /* Allocate inbound and outbound buffers. */
+ ret = qcomtee_msg_buffers_alloc(oic, u);
+ if (ret) {
+ qcomtee_qtee_objects_put(u);
+
+ return ret;
+ }
+
+ ret = qcomtee_prepare_msg(oic, object, op, u);
+ if (ret) {
+ qcomtee_qtee_objects_put(u);
+
+ goto out;
+ }
+
+ /* Use input message buffer in 'oic'. */
+ cb_msg = oic->out_msg.addr;
+
+ while (1) {
+ if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) {
+ errno = oic->errno;
+ if (!errno)
+ errno = qcomtee_update_msg(oic);
+ qcomtee_msg_set_result(cb_msg, errno);
+ }
+
+ /* Invoke the remote object. */
+ ret = qcomtee_object_invoke_ctx_invoke(oic, result, &res_type);
+ /* Return form callback objects result submission: */
+ if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) {
+ qto = oic->object;
+ if (qto) {
+ if (oic->flags & QCOMTEE_OIC_FLAG_NOTIFY) {
+ if (qto->ops->notify)
+ qto->ops->notify(oic, qto,
+ errno || ret);
+ }
+
+ /* Get is in qcomtee_cb_object_invoke(). */
+ qcomtee_object_put(qto);
+ }
+
+ oic->object = NULL_QCOMTEE_OBJECT;
+ oic->flags &= ~(QCOMTEE_OIC_FLAG_BUSY |
+ QCOMTEE_OIC_FLAG_NOTIFY);
+ }
+
+ if (ret) {
+ /*
+ * Unable to finished the invocation.
+ * If QCOMTEE_OIC_FLAG_SHARED is not set, put
+ * QCOMTEE_OBJECT_TYPE_CB input objects.
+ */
+ if (!(oic->flags & QCOMTEE_OIC_FLAG_SHARED))
+ qcomtee_qtee_objects_put(u);
+ else
+ ret = -ENODEV;
+
+ goto out;
+
+ } else {
+ /*
+ * QTEE obtained ownership of QCOMTEE_OBJECT_TYPE_CB
+ * input objects in 'u'. On further failure, QTEE is
+ * responsible for releasing them.
+ */
+ oic->flags |= QCOMTEE_OIC_FLAG_SHARED;
+ }
+
+ /* Is it a callback request? */
+ if (res_type != QCOMTEE_RESULT_INBOUND_REQ_NEEDED) {
+ /*
+ * Parse results. If failed, assume the service
+ * was unavailable (i.e. QCOMTEE_MSG_ERROR_UNAVAIL)
+ * and put output objects to initiate cleanup.
+ */
+ if (!*result && qcomtee_update_args(u, oic)) {
+ *result = QCOMTEE_MSG_ERROR_UNAVAIL;
+ qcomtee_arg_for_each_output_object(i, u)
+ qcomtee_object_put(u[i].o);
+ }
+
+ break;
+
+ } else {
+ oic->flags |= QCOMTEE_OIC_FLAG_BUSY;
+ qcomtee_fetch_async_reqs(oic);
+ qcomtee_cb_object_invoke(oic, cb_msg);
+ }
+ }
+
+ qcomtee_fetch_async_reqs(oic);
+out:
+ qcomtee_msg_buffers_free(oic);
+
+ return ret;
+}
+
+int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result)
+{
+ /* User can not set bits used by transport. */
+ if (op & ~QCOMTEE_MSG_OBJECT_OP_MASK)
+ return -EINVAL;
+
+ /* User can only invoke QTEE hosted objects. */
+ if (typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_TEE &&
+ typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_ROOT)
+ return -EINVAL;
+
+ /* User cannot directly issue these operations to QTEE. */
+ if (op == QCOMTEE_MSG_OBJECT_OP_RELEASE ||
+ op == QCOMTEE_MSG_OBJECT_OP_RETAIN)
+ return -EINVAL;
+
+ return qcomtee_object_do_invoke_internal(oic, object, op, u, result);
+}
+
+/**
+ * qcomtee_object_get_client_env() - Get a privileged client env. object.
+ * @oic: context to use for the current invocation.
+ *
+ * The caller should call qcomtee_object_put() on the returned object
+ * to release it.
+ *
+ * Return: On error, returns %NULL_QCOMTEE_OBJECT.
+ * On success, returns the object.
+ */
+struct qcomtee_object *
+qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic)
+{
+ struct qcomtee_arg u[3] = { 0 };
+ int ret, result;
+
+ u[0].o = NULL_QCOMTEE_OBJECT;
+ u[0].type = QCOMTEE_ARG_TYPE_IO;
+ u[1].type = QCOMTEE_ARG_TYPE_OO;
+ ret = qcomtee_object_do_invoke(oic, ROOT_QCOMTEE_OBJECT,
+ QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS, u,
+ &result);
+ if (ret || result)
+ return NULL_QCOMTEE_OBJECT;
+
+ return u[1].o;
+}
+
+struct qcomtee_object *
+qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *client_env, u32 uid)
+{
+ struct qcomtee_arg u[3] = { 0 };
+ int ret, result;
+
+ u[0].b.addr = &uid;
+ u[0].b.size = sizeof(uid);
+ u[0].type = QCOMTEE_ARG_TYPE_IB;
+ u[1].type = QCOMTEE_ARG_TYPE_OO;
+ ret = qcomtee_object_do_invoke(oic, client_env, QCOMTEE_CLIENT_ENV_OPEN,
+ u, &result);
+
+ if (ret || result)
+ return NULL_QCOMTEE_OBJECT;
+
+ return u[1].o;
+}
diff --git a/drivers/tee/qcomtee/mem_obj.c b/drivers/tee/qcomtee/mem_obj.c
new file mode 100644
index 000000000000..228a3e30a31b
--- /dev/null
+++ b/drivers/tee/qcomtee/mem_obj.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/mm.h>
+
+#include "qcomtee.h"
+
+/**
+ * DOC: Memory and Mapping Objects
+ *
+ * QTEE uses memory objects for memory sharing with Linux.
+ * A memory object can be a standard dma_buf or a contiguous memory range,
+ * e.g., tee_shm. A memory object should support one operation: map. When
+ * invoked by QTEE, a mapping object is generated. A mapping object supports
+ * one operation: unmap.
+ *
+ * (1) To map a memory object, QTEE invokes the primordial object with
+ * %QCOMTEE_OBJECT_OP_MAP_REGION operation; see
+ * qcomtee_primordial_obj_dispatch().
+ * (2) To unmap a memory object, QTEE releases the mapping object which
+ * calls qcomtee_mem_object_release().
+ *
+ * The map operation is implemented in the primordial object as a privileged
+ * operation instead of qcomtee_mem_object_dispatch(). Otherwise, on
+ * platforms without shm_bridge, a user can trick QTEE into writing to the
+ * kernel memory by passing a user object as a memory object and returning a
+ * random physical address as the result of the mapping request.
+ */
+
+struct qcomtee_mem_object {
+ struct qcomtee_object object;
+ struct tee_shm *shm;
+ /* QTEE requires these felids to be page aligned. */
+ phys_addr_t paddr; /* Physical address of range. */
+ size_t size; /* Size of the range. */
+};
+
+#define to_qcomtee_mem_object(o) \
+ container_of((o), struct qcomtee_mem_object, object)
+
+static struct qcomtee_object_operations qcomtee_mem_object_ops;
+
+/* Is it a memory object using tee_shm? */
+int is_qcomtee_memobj_object(struct qcomtee_object *object)
+{
+ return object != NULL_QCOMTEE_OBJECT &&
+ typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB &&
+ object->ops == &qcomtee_mem_object_ops;
+}
+
+static int qcomtee_mem_object_dispatch(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *args)
+{
+ return -EINVAL;
+}
+
+static void qcomtee_mem_object_release(struct qcomtee_object *object)
+{
+ struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object);
+
+ /* Matching get is in qcomtee_memobj_param_to_object(). */
+ tee_shm_put(mem_object->shm);
+ kfree(mem_object);
+}
+
+static struct qcomtee_object_operations qcomtee_mem_object_ops = {
+ .release = qcomtee_mem_object_release,
+ .dispatch = qcomtee_mem_object_dispatch,
+};
+
+/**
+ * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object.
+ * @object: object returned.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_memobj_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_mem_object *mem_object __free(kfree) = NULL;
+ struct tee_shm *shm;
+ int err;
+
+ mem_object = kzalloc(sizeof(*mem_object), GFP_KERNEL);
+ if (!mem_object)
+ return -ENOMEM;
+
+ shm = tee_shm_get_from_id(ctx, param->u.objref.id);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /* mem-object wrapping the memref. */
+ err = qcomtee_object_user_init(&mem_object->object,
+ QCOMTEE_OBJECT_TYPE_CB,
+ &qcomtee_mem_object_ops, "tee-shm-%d",
+ shm->id);
+ if (err) {
+ tee_shm_put(shm);
+
+ return err;
+ }
+
+ mem_object->paddr = shm->paddr;
+ mem_object->size = shm->size;
+ mem_object->shm = shm;
+
+ *object = &no_free_ptr(mem_object)->object;
+
+ return 0;
+}
+
+/* Reverse what qcomtee_memobj_param_to_object() does. */
+int qcomtee_memobj_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx)
+{
+ struct qcomtee_mem_object *mem_object;
+
+ mem_object = to_qcomtee_mem_object(object);
+ /* Sure if the memobj is in a same context it is originated from. */
+ if (mem_object->shm->ctx != ctx)
+ return -EINVAL;
+
+ param->u.objref.id = mem_object->shm->id;
+ param->u.objref.flags = QCOMTEE_OBJREF_FLAG_MEM;
+
+ /* Passing shm->id to userspace; drop the reference. */
+ qcomtee_object_put(object);
+
+ return 0;
+}
+
+/**
+ * qcomtee_mem_object_map() - Map a memory object.
+ * @object: memory object.
+ * @map_object: created mapping object.
+ * @mem_paddr: physical address of the memory.
+ * @mem_size: size of the memory.
+ * @perms: QTEE access permissions.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_mem_object_map(struct qcomtee_object *object,
+ struct qcomtee_object **map_object, u64 *mem_paddr,
+ u64 *mem_size, u32 *perms)
+{
+ struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object);
+
+ /* Reuses the memory object as a mapping object by re-sharing it. */
+ qcomtee_object_get(&mem_object->object);
+
+ *map_object = &mem_object->object;
+ *mem_paddr = mem_object->paddr;
+ *mem_size = mem_object->size;
+ *perms = QCOM_SCM_PERM_RW;
+
+ return 0;
+}
diff --git a/drivers/tee/qcomtee/primordial_obj.c b/drivers/tee/qcomtee/primordial_obj.c
new file mode 100644
index 000000000000..b6f811e83b11
--- /dev/null
+++ b/drivers/tee/qcomtee/primordial_obj.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/delay.h>
+#include "qcomtee.h"
+
+/**
+ * DOC: Primordial Object
+ *
+ * After boot, the kernel provides a static object of type
+ * %QCOMTEE_OBJECT_TYPE_CB called the primordial object. This object is used
+ * for native kernel services or privileged operations.
+ *
+ * We support:
+ * - %QCOMTEE_OBJECT_OP_MAP_REGION to map a memory object and return mapping
+ * object and mapping information (see qcomtee_mem_object_map()).
+ * - %QCOMTEE_OBJECT_OP_YIELD to yield by the thread running in QTEE.
+ * - %QCOMTEE_OBJECT_OP_SLEEP to wait for a period of time.
+ */
+
+#define QCOMTEE_OBJECT_OP_MAP_REGION 0
+#define QCOMTEE_OBJECT_OP_YIELD 1
+#define QCOMTEE_OBJECT_OP_SLEEP 2
+
+/* Mapping information format as expected by QTEE. */
+struct qcomtee_mapping_info {
+ u64 paddr;
+ u64 len;
+ u32 perms;
+} __packed;
+
+static int
+qcomtee_primordial_obj_dispatch(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *primordial_object_unused,
+ u32 op, struct qcomtee_arg *args)
+{
+ struct qcomtee_mapping_info *map_info;
+ struct qcomtee_object *mem_object;
+ struct qcomtee_object *map_object;
+ int err = 0;
+
+ switch (op) {
+ case QCOMTEE_OBJECT_OP_YIELD:
+ cond_resched();
+ /* No output object. */
+ oic->data = NULL;
+
+ break;
+ case QCOMTEE_OBJECT_OP_SLEEP:
+ /* Check message format matched QCOMTEE_OBJECT_OP_SLEEP op. */
+ if (qcomtee_args_len(args) != 1 ||
+ args[0].type != QCOMTEE_ARG_TYPE_IB ||
+ args[0].b.size < sizeof(u32))
+ return -EINVAL;
+
+ msleep(*(u32 *)(args[0].b.addr));
+ /* No output object. */
+ oic->data = NULL;
+
+ break;
+ case QCOMTEE_OBJECT_OP_MAP_REGION:
+ if (qcomtee_args_len(args) != 3 ||
+ args[0].type != QCOMTEE_ARG_TYPE_OB ||
+ args[1].type != QCOMTEE_ARG_TYPE_IO ||
+ args[2].type != QCOMTEE_ARG_TYPE_OO ||
+ args[0].b.size < sizeof(struct qcomtee_mapping_info))
+ return -EINVAL;
+
+ map_info = args[0].b.addr;
+ mem_object = args[1].o;
+
+ qcomtee_mem_object_map(mem_object, &map_object,
+ &map_info->paddr, &map_info->len,
+ &map_info->perms);
+
+ args[2].o = map_object;
+ /* One output object; pass it for cleanup to notify. */
+ oic->data = map_object;
+
+ qcomtee_object_put(mem_object);
+
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/* Called after submitting the callback response. */
+static void qcomtee_primordial_obj_notify(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *unused,
+ int err)
+{
+ struct qcomtee_object *object = oic->data;
+
+ /* If err, QTEE did not obtain mapping object. Drop it. */
+ if (object && err)
+ qcomtee_object_put(object);
+}
+
+static struct qcomtee_object_operations qcomtee_primordial_obj_ops = {
+ .dispatch = qcomtee_primordial_obj_dispatch,
+ .notify = qcomtee_primordial_obj_notify,
+};
+
+struct qcomtee_object qcomtee_primordial_object = {
+ .name = "primordial",
+ .object_type = QCOMTEE_OBJECT_TYPE_CB,
+ .ops = &qcomtee_primordial_obj_ops
+};
diff --git a/drivers/tee/qcomtee/qcomtee.h b/drivers/tee/qcomtee/qcomtee.h
new file mode 100644
index 000000000000..f39bf63fd1c2
--- /dev/null
+++ b/drivers/tee/qcomtee/qcomtee.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef QCOMTEE_H
+#define QCOMTEE_H
+
+#include <linux/kobject.h>
+#include <linux/tee_core.h>
+
+#include "qcomtee_msg.h"
+#include "qcomtee_object.h"
+
+/* Flags relating to object reference. */
+#define QCOMTEE_OBJREF_FLAG_TEE BIT(0)
+#define QCOMTEE_OBJREF_FLAG_USER BIT(1)
+#define QCOMTEE_OBJREF_FLAG_MEM BIT(2)
+
+/**
+ * struct qcomtee - Main service struct.
+ * @teedev: client device.
+ * @pool: shared memory pool.
+ * @ctx: driver private context.
+ * @oic: context to use for the current driver invocation.
+ * @wq: workqueue for QTEE async operations.
+ * @xa_local_objects: array of objects exported to QTEE.
+ * @xa_last_id: next ID to allocate.
+ * @qtee_version: QTEE version.
+ */
+struct qcomtee {
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+ struct tee_context *ctx;
+ struct qcomtee_object_invoke_ctx oic;
+ struct workqueue_struct *wq;
+ struct xarray xa_local_objects;
+ u32 xa_last_id;
+ u32 qtee_version;
+};
+
+void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic);
+struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic,
+ u32 idx);
+
+struct tee_shm_pool *qcomtee_shm_pool_alloc(void);
+void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic);
+int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_arg *u);
+
+/**
+ * qcomtee_object_do_invoke_internal() - Submit an invocation for an object.
+ * @oic: context to use for the current invocation.
+ * @object: object being invoked.
+ * @op: requested operation on the object.
+ * @u: array of arguments for the current invocation.
+ * @result: result returned from QTEE.
+ *
+ * The caller is responsible for keeping track of the refcount for each
+ * object, including @object. On return, the caller loses ownership of all
+ * input objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result);
+
+/**
+ * struct qcomtee_context_data - Clients' or supplicants' context.
+ * @qtee_objects_idr: QTEE objects in this context.
+ * @qtee_lock: mutex for @qtee_objects_idr.
+ * @reqs_idr: requests in this context that hold ID.
+ * @reqs_list: FIFO for requests in PROCESSING or QUEUED state.
+ * @reqs_lock: mutex for @reqs_idr, @reqs_list and request states.
+ * @req_c: completion used when the supplicant is waiting for requests.
+ * @released: state of this context.
+ */
+struct qcomtee_context_data {
+ struct idr qtee_objects_idr;
+ /* Synchronize access to @qtee_objects_idr. */
+ struct mutex qtee_lock;
+
+ struct idr reqs_idr;
+ struct list_head reqs_list;
+ /* Synchronize access to @reqs_idr, @reqs_list and updating requests states. */
+ struct mutex reqs_lock;
+
+ struct completion req_c;
+
+ bool released;
+};
+
+int qcomtee_context_add_qtee_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx);
+int qcomtee_context_find_qtee_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx);
+void qcomtee_context_del_qtee_object(struct tee_param *param,
+ struct tee_context *ctx);
+
+int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param,
+ struct tee_context *ctx);
+int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg,
+ struct tee_context *ctx);
+
+/* OBJECTS: */
+
+/* (1) User Object API. */
+
+int is_qcomtee_user_object(struct qcomtee_object *object);
+void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify);
+void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata);
+int qcomtee_user_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx);
+int qcomtee_user_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx);
+
+/**
+ * struct qcomtee_user_object_request_data - Data for user object request.
+ * @id: ID assigned to the request.
+ * @object_id: Object ID being invoked by QTEE.
+ * @op: Requested operation on object.
+ * @np: Number of parameters in the request.
+ */
+struct qcomtee_user_object_request_data {
+ int id;
+ u64 object_id;
+ u32 op;
+ int np;
+};
+
+int qcomtee_user_object_select(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ void __user *uaddr, size_t size,
+ struct qcomtee_user_object_request_data *data);
+int qcomtee_user_object_submit(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ int req_id, int errno);
+
+/* (2) Primordial Object. */
+extern struct qcomtee_object qcomtee_primordial_object;
+
+/* (3) Memory Object API. */
+
+/* Is it a memory object using tee_shm? */
+int is_qcomtee_memobj_object(struct qcomtee_object *object);
+
+/**
+ * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object.
+ * @object: object returned.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_memobj_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx);
+
+/* Reverse what qcomtee_memobj_param_to_object() does. */
+int qcomtee_memobj_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx);
+
+/**
+ * qcomtee_mem_object_map() - Map a memory object.
+ * @object: memory object.
+ * @map_object: created mapping object.
+ * @mem_paddr: physical address of the memory.
+ * @mem_size: size of the memory.
+ * @perms: QTEE access permissions.
+ *
+ * Return: On success return 0 or <0 on failure.
+ */
+int qcomtee_mem_object_map(struct qcomtee_object *object,
+ struct qcomtee_object **map_object, u64 *mem_paddr,
+ u64 *mem_size, u32 *perms);
+
+#endif /* QCOMTEE_H */
diff --git a/drivers/tee/qcomtee/qcomtee_msg.h b/drivers/tee/qcomtee/qcomtee_msg.h
new file mode 100644
index 000000000000..878f70178a5b
--- /dev/null
+++ b/drivers/tee/qcomtee/qcomtee_msg.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef QCOMTEE_MSG_H
+#define QCOMTEE_MSG_H
+
+#include <linux/bitfield.h>
+
+/**
+ * DOC: ''Qualcomm TEE'' (QTEE) Transport Message
+ *
+ * There are two buffers shared with QTEE: inbound and outbound buffers.
+ * The inbound buffer is used for direct object invocation, and the outbound
+ * buffer is used to make a request from QTEE to the kernel; i.e., a callback
+ * request.
+ *
+ * The unused tail of the outbound buffer is also used for sending and
+ * receiving asynchronous messages. An asynchronous message is independent of
+ * the current object invocation (i.e., contents of the inbound buffer) or
+ * callback request (i.e., the head of the outbound buffer); see
+ * qcomtee_get_async_buffer(). It is used by endpoints (QTEE or kernel) as an
+ * optimization to reduce the number of context switches between the secure and
+ * non-secure worlds.
+ *
+ * For instance, QTEE never sends an explicit callback request to release an
+ * object in the kernel. Instead, it sends asynchronous release messages in the
+ * outbound buffer when QTEE returns from the previous direct object invocation,
+ * or appends asynchronous release messages after the current callback request.
+ *
+ * QTEE supports two types of arguments in a message: buffer and object
+ * arguments. Depending on the direction of data flow, they could be input
+ * buffer (IO) to QTEE, output buffer (OB) from QTEE, input object (IO) to QTEE,
+ * or output object (OO) from QTEE. Object arguments hold object IDs. Buffer
+ * arguments hold (offset, size) pairs into the inbound or outbound buffers.
+ *
+ * QTEE holds an object table for objects it hosts and exposes to the kernel.
+ * An object ID is an index to the object table in QTEE.
+ *
+ * For the direct object invocation message format in the inbound buffer, see
+ * &struct qcomtee_msg_object_invoke. For the callback request message format
+ * in the outbound buffer, see &struct qcomtee_msg_callback. For the message
+ * format for asynchronous messages in the outbound buffer, see
+ * &struct qcomtee_async_msg_hdr.
+ */
+
+/**
+ * define QCOMTEE_MSG_OBJECT_NS_BIT - Non-secure bit
+ *
+ * Object ID is a globally unique 32-bit number. IDs referencing objects
+ * in the kernel should have %QCOMTEE_MSG_OBJECT_NS_BIT set.
+ */
+#define QCOMTEE_MSG_OBJECT_NS_BIT BIT(31)
+
+/* Static object IDs recognized by QTEE. */
+#define QCOMTEE_MSG_OBJECT_NULL (0U)
+#define QCOMTEE_MSG_OBJECT_ROOT (1U)
+
+/* Definitions from QTEE as part of the transport protocol. */
+
+/* qcomtee_msg_arg is an argument as recognized by QTEE. */
+union qcomtee_msg_arg {
+ struct {
+ u32 offset;
+ u32 size;
+ } b;
+ u32 o;
+};
+
+/* BI and BO payloads in QTEE messages should be at 64-bit boundaries. */
+#define qcomtee_msg_offset_align(o) ALIGN((o), sizeof(u64))
+
+/* Operations for objects are 32-bit. Transport uses the upper 16 bits. */
+#define QCOMTEE_MSG_OBJECT_OP_MASK GENMASK(15, 0)
+
+/* Reserved Operation IDs sent to QTEE: */
+/* QCOMTEE_MSG_OBJECT_OP_RELEASE - Reduces the refcount and releases the object.
+ * QCOMTEE_MSG_OBJECT_OP_RETAIN - Increases the refcount.
+ *
+ * These operation IDs are valid for all objects.
+ */
+
+#define QCOMTEE_MSG_OBJECT_OP_RELEASE (QCOMTEE_MSG_OBJECT_OP_MASK - 0)
+#define QCOMTEE_MSG_OBJECT_OP_RETAIN (QCOMTEE_MSG_OBJECT_OP_MASK - 1)
+
+/* Subset of operations supported by QTEE root object. */
+
+#define QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS 5
+#define QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE 4
+#define QCOMTEE_ROOT_OP_ADCI_ACCEPT 8
+#define QCOMTEE_ROOT_OP_ADCI_SHUTDOWN 9
+
+/* Subset of operations supported by client_env object. */
+
+#define QCOMTEE_CLIENT_ENV_OPEN 0
+
+/* List of available QTEE service UIDs and subset of operations. */
+
+#define QCOMTEE_FEATURE_VER_UID 2033
+#define QCOMTEE_FEATURE_VER_OP_GET 0
+/* Get QTEE version number. */
+#define QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID 10
+#define QTEE_VERSION_GET_MAJOR(x) (((x) >> 22) & 0xffU)
+#define QTEE_VERSION_GET_MINOR(x) (((x) >> 12) & 0xffU)
+#define QTEE_VERSION_GET_PATCH(x) ((x) >> 0 & 0xfffU)
+
+/* Response types as returned from qcomtee_object_invoke_ctx_invoke(). */
+
+/* The message contains a callback request. */
+#define QCOMTEE_RESULT_INBOUND_REQ_NEEDED 3
+
+/**
+ * struct qcomtee_msg_object_invoke - Direct object invocation message.
+ * @ctx: object ID hosted in QTEE.
+ * @op: operation for the object.
+ * @counts: number of different types of arguments in @args.
+ * @args: array of arguments.
+ *
+ * @counts consists of 4 * 4-bit fields. Bits 0 - 3 represent the number of
+ * input buffers, bits 4 - 7 represent the number of output buffers,
+ * bits 8 - 11 represent the number of input objects, and bits 12 - 15
+ * represent the number of output objects. The remaining bits should be zero.
+ *
+ * 15 12 11 8 7 4 3 0
+ * +----------------+----------------+----------------+----------------+
+ * | #OO objects | #IO objects | #OB buffers | #IB buffers |
+ * +----------------+----------------+----------------+----------------+
+ *
+ * The maximum number of arguments of each type is defined by
+ * %QCOMTEE_ARGS_PER_TYPE.
+ */
+struct qcomtee_msg_object_invoke {
+ u32 cxt;
+ u32 op;
+ u32 counts;
+ union qcomtee_msg_arg args[];
+};
+
+/* Bit masks for the four 4-bit nibbles holding the counts. */
+#define QCOMTEE_MASK_IB GENMASK(3, 0)
+#define QCOMTEE_MASK_OB GENMASK(7, 4)
+#define QCOMTEE_MASK_IO GENMASK(11, 8)
+#define QCOMTEE_MASK_OO GENMASK(15, 12)
+
+/**
+ * struct qcomtee_msg_callback - Callback request message.
+ * @result: result of operation @op on the object referenced by @cxt.
+ * @cxt: object ID hosted in the kernel.
+ * @op: operation for the object.
+ * @counts: number of different types of arguments in @args.
+ * @args: array of arguments.
+ *
+ * For details of @counts, see &qcomtee_msg_object_invoke.counts.
+ */
+struct qcomtee_msg_callback {
+ u32 result;
+ u32 cxt;
+ u32 op;
+ u32 counts;
+ union qcomtee_msg_arg args[];
+};
+
+/* Offset in the message for the beginning of the buffer argument's contents. */
+#define qcomtee_msg_buffer_args(t, n) \
+ qcomtee_msg_offset_align(struct_size_t(t, args, n))
+/* Pointer to the beginning of a buffer argument's content at an offset. */
+#define qcomtee_msg_offset_to_ptr(m, off) ((void *)&((char *)(m))[(off)])
+
+/* Some helpers to manage msg.counts. */
+
+static inline unsigned int qcomtee_msg_num_ib(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_IB, counts);
+}
+
+static inline unsigned int qcomtee_msg_num_ob(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_OB, counts);
+}
+
+static inline unsigned int qcomtee_msg_num_io(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_IO, counts);
+}
+
+static inline unsigned int qcomtee_msg_num_oo(u32 counts)
+{
+ return FIELD_GET(QCOMTEE_MASK_OO, counts);
+}
+
+static inline unsigned int qcomtee_msg_idx_ib(u32 counts)
+{
+ return 0;
+}
+
+static inline unsigned int qcomtee_msg_idx_ob(u32 counts)
+{
+ return qcomtee_msg_num_ib(counts);
+}
+
+static inline unsigned int qcomtee_msg_idx_io(u32 counts)
+{
+ return qcomtee_msg_idx_ob(counts) + qcomtee_msg_num_ob(counts);
+}
+
+static inline unsigned int qcomtee_msg_idx_oo(u32 counts)
+{
+ return qcomtee_msg_idx_io(counts) + qcomtee_msg_num_io(counts);
+}
+
+#define qcomtee_msg_for_each(i, first, num) \
+ for ((i) = (first); (i) < (first) + (num); (i)++)
+
+#define qcomtee_msg_for_each_input_buffer(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_ib((m)->counts), \
+ qcomtee_msg_num_ib((m)->counts))
+
+#define qcomtee_msg_for_each_output_buffer(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_ob((m)->counts), \
+ qcomtee_msg_num_ob((m)->counts))
+
+#define qcomtee_msg_for_each_input_object(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_io((m)->counts), \
+ qcomtee_msg_num_io((m)->counts))
+
+#define qcomtee_msg_for_each_output_object(i, m) \
+ qcomtee_msg_for_each(i, qcomtee_msg_idx_oo((m)->counts), \
+ qcomtee_msg_num_oo((m)->counts))
+
+/* Sum of arguments in a message. */
+#define qcomtee_msg_args(m) \
+ (qcomtee_msg_idx_oo((m)->counts) + qcomtee_msg_num_oo((m)->counts))
+
+static inline void qcomtee_msg_init(struct qcomtee_msg_object_invoke *msg,
+ u32 cxt, u32 op, int in_buffer,
+ int out_buffer, int in_object,
+ int out_object)
+{
+ u32 counts = 0;
+
+ counts |= (in_buffer & 0xfU);
+ counts |= ((out_buffer - in_buffer) & 0xfU) << 4;
+ counts |= ((in_object - out_buffer) & 0xfU) << 8;
+ counts |= ((out_object - in_object) & 0xfU) << 12;
+
+ msg->cxt = cxt;
+ msg->op = op;
+ msg->counts = counts;
+}
+
+/* Generic error codes. */
+#define QCOMTEE_MSG_OK 0 /* non-specific success code. */
+#define QCOMTEE_MSG_ERROR 1 /* non-specific error. */
+#define QCOMTEE_MSG_ERROR_INVALID 2 /* unsupported/unrecognized request. */
+#define QCOMTEE_MSG_ERROR_SIZE_IN 3 /* supplied buffer/string too large. */
+#define QCOMTEE_MSG_ERROR_SIZE_OUT 4 /* supplied output buffer too small. */
+#define QCOMTEE_MSG_ERROR_USERBASE 10 /* start of user-defined error range. */
+
+/* Transport layer error codes. */
+#define QCOMTEE_MSG_ERROR_DEFUNCT -90 /* object no longer exists. */
+#define QCOMTEE_MSG_ERROR_ABORT -91 /* calling thread must exit. */
+#define QCOMTEE_MSG_ERROR_BADOBJ -92 /* invalid object context. */
+#define QCOMTEE_MSG_ERROR_NOSLOTS -93 /* caller's object table full. */
+#define QCOMTEE_MSG_ERROR_MAXARGS -94 /* too many args. */
+#define QCOMTEE_MSG_ERROR_MAXDATA -95 /* buffers too large. */
+#define QCOMTEE_MSG_ERROR_UNAVAIL -96 /* the request could not be processed. */
+#define QCOMTEE_MSG_ERROR_KMEM -97 /* kernel out of memory. */
+#define QCOMTEE_MSG_ERROR_REMOTE -98 /* local method sent to remote object. */
+#define QCOMTEE_MSG_ERROR_BUSY -99 /* Object is busy. */
+#define QCOMTEE_MSG_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */
+
+static inline void qcomtee_msg_set_result(struct qcomtee_msg_callback *cb_msg,
+ int err)
+{
+ if (!err) {
+ cb_msg->result = QCOMTEE_MSG_OK;
+ } else if (err < 0) {
+ /* If err < 0, then it is a transport error. */
+ switch (err) {
+ case -ENOMEM:
+ cb_msg->result = QCOMTEE_MSG_ERROR_KMEM;
+ break;
+ case -ENODEV:
+ cb_msg->result = QCOMTEE_MSG_ERROR_DEFUNCT;
+ break;
+ case -ENOSPC:
+ case -EBUSY:
+ cb_msg->result = QCOMTEE_MSG_ERROR_BUSY;
+ break;
+ case -EBADF:
+ case -EINVAL:
+ cb_msg->result = QCOMTEE_MSG_ERROR_UNAVAIL;
+ break;
+ default:
+ cb_msg->result = QCOMTEE_MSG_ERROR;
+ }
+ } else {
+ /* If err > 0, then it is user defined error, pass it as is. */
+ cb_msg->result = err;
+ }
+}
+
+#endif /* QCOMTEE_MSG_H */
diff --git a/drivers/tee/qcomtee/qcomtee_object.h b/drivers/tee/qcomtee/qcomtee_object.h
new file mode 100644
index 000000000000..5221449be7db
--- /dev/null
+++ b/drivers/tee/qcomtee/qcomtee_object.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef QCOMTEE_OBJECT_H
+#define QCOMTEE_OBJECT_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+struct qcomtee_object;
+
+/**
+ * DOC: Overview
+ *
+ * qcomtee_object provides object refcounting, ID allocation for objects hosted
+ * in the kernel, and necessary message marshaling for Qualcomm TEE (QTEE).
+ *
+ * To invoke an object in QTEE, the user calls qcomtee_object_do_invoke()
+ * while passing an instance of &struct qcomtee_object and the requested
+ * operation + arguments.
+ *
+ * After boot, QTEE provides a static object %ROOT_QCOMTEE_OBJECT (type of
+ * %QCOMTEE_OBJECT_TYPE_ROOT). The root object is invoked to pass the user's
+ * credentials and obtain other instances of &struct qcomtee_object (type of
+ * %QCOMTEE_OBJECT_TYPE_TEE) that represent services and TAs in QTEE;
+ * see &enum qcomtee_object_type.
+ *
+ * The objects received from QTEE are refcounted. So the owner of these objects
+ * can issue qcomtee_object_get() to increase the refcount and pass objects
+ * to other clients, or issue qcomtee_object_put() to decrease the refcount
+ * and release the resources in QTEE.
+ *
+ * The kernel can host services accessible to QTEE. A driver should embed
+ * an instance of &struct qcomtee_object in the struct it wants to export to
+ * QTEE (this is called a callback object). It issues qcomtee_object_user_init()
+ * to set the dispatch() operation for the callback object and set its type
+ * to %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * core.c holds an object table for callback objects. An object ID is assigned
+ * to each callback object, which is an index to the object table. QTEE uses
+ * these IDs to reference or invoke callback objects.
+ *
+ * If QTEE invokes a callback object in the kernel, the dispatch() operation is
+ * called in the context of the thread that originally called
+ * qcomtee_object_do_invoke().
+ */
+
+/**
+ * enum qcomtee_object_type - Object types.
+ * @QCOMTEE_OBJECT_TYPE_TEE: object hosted on QTEE.
+ * @QCOMTEE_OBJECT_TYPE_CB: object hosted on kernel.
+ * @QCOMTEE_OBJECT_TYPE_ROOT: 'primordial' object.
+ * @QCOMTEE_OBJECT_TYPE_NULL: NULL object.
+ *
+ * The primordial object is used for bootstrapping the IPC connection between
+ * the kernel and QTEE. It is invoked by the kernel when it wants to get a
+ * 'client env'.
+ */
+enum qcomtee_object_type {
+ QCOMTEE_OBJECT_TYPE_TEE,
+ QCOMTEE_OBJECT_TYPE_CB,
+ QCOMTEE_OBJECT_TYPE_ROOT,
+ QCOMTEE_OBJECT_TYPE_NULL,
+};
+
+/**
+ * enum qcomtee_arg_type - Type of QTEE argument.
+ * @QCOMTEE_ARG_TYPE_INV: invalid type.
+ * @QCOMTEE_ARG_TYPE_OB: output buffer (OB).
+ * @QCOMTEE_ARG_TYPE_OO: output object (OO).
+ * @QCOMTEE_ARG_TYPE_IB: input buffer (IB).
+ * @QCOMTEE_ARG_TYPE_IO: input object (IO).
+ *
+ * Use the invalid type to specify the end of the argument array.
+ */
+enum qcomtee_arg_type {
+ QCOMTEE_ARG_TYPE_INV = 0,
+ QCOMTEE_ARG_TYPE_OB,
+ QCOMTEE_ARG_TYPE_OO,
+ QCOMTEE_ARG_TYPE_IB,
+ QCOMTEE_ARG_TYPE_IO,
+ QCOMTEE_ARG_TYPE_NR,
+};
+
+/**
+ * define QCOMTEE_ARGS_PER_TYPE - Maximum arguments of a specific type.
+ *
+ * The QTEE transport protocol limits the maximum number of arguments of
+ * a specific type (i.e., IB, OB, IO, and OO).
+ */
+#define QCOMTEE_ARGS_PER_TYPE 16
+
+/* Maximum arguments that can fit in a QTEE message, ignoring the type. */
+#define QCOMTEE_ARGS_MAX (QCOMTEE_ARGS_PER_TYPE * (QCOMTEE_ARG_TYPE_NR - 1))
+
+struct qcomtee_buffer {
+ union {
+ void *addr;
+ void __user *uaddr;
+ };
+ size_t size;
+};
+
+/**
+ * struct qcomtee_arg - Argument for QTEE object invocation.
+ * @type: type of argument as &enum qcomtee_arg_type.
+ * @flags: extra flags.
+ * @b: address and size if the type of argument is a buffer.
+ * @o: object instance if the type of argument is an object.
+ *
+ * &qcomtee_arg.flags only accepts %QCOMTEE_ARG_FLAGS_UADDR for now, which
+ * states that &qcomtee_arg.b contains a userspace address in uaddr.
+ */
+struct qcomtee_arg {
+ enum qcomtee_arg_type type;
+/* 'b.uaddr' holds a __user address. */
+#define QCOMTEE_ARG_FLAGS_UADDR BIT(0)
+ unsigned int flags;
+ union {
+ struct qcomtee_buffer b;
+ struct qcomtee_object *o;
+ };
+};
+
+static inline int qcomtee_args_len(struct qcomtee_arg *args)
+{
+ int i = 0;
+
+ while (args[i].type != QCOMTEE_ARG_TYPE_INV)
+ i++;
+ return i;
+}
+
+/* Context is busy (callback is in progress). */
+#define QCOMTEE_OIC_FLAG_BUSY BIT(1)
+/* Context needs to notify the current object. */
+#define QCOMTEE_OIC_FLAG_NOTIFY BIT(2)
+/* Context has shared state with QTEE. */
+#define QCOMTEE_OIC_FLAG_SHARED BIT(3)
+
+/**
+ * struct qcomtee_object_invoke_ctx - QTEE context for object invocation.
+ * @ctx: TEE context for this invocation.
+ * @flags: flags for the invocation context.
+ * @errno: error code for the invocation.
+ * @object: current object invoked in this callback context.
+ * @u: array of arguments for the current invocation (+1 for ending arg).
+ * @in_msg: inbound buffer shared with QTEE.
+ * @out_msg: outbound buffer shared with QTEE.
+ * @in_shm: TEE shm allocated for inbound buffer.
+ * @out_shm: TEE shm allocated for outbound buffer.
+ * @data: extra data attached to this context.
+ */
+struct qcomtee_object_invoke_ctx {
+ struct tee_context *ctx;
+ unsigned long flags;
+ int errno;
+
+ struct qcomtee_object *object;
+ struct qcomtee_arg u[QCOMTEE_ARGS_MAX + 1];
+
+ struct qcomtee_buffer in_msg;
+ struct qcomtee_buffer out_msg;
+ struct tee_shm *in_shm;
+ struct tee_shm *out_shm;
+
+ void *data;
+};
+
+static inline struct qcomtee_object_invoke_ctx *
+qcomtee_object_invoke_ctx_alloc(struct tee_context *ctx)
+{
+ struct qcomtee_object_invoke_ctx *oic;
+
+ oic = kzalloc(sizeof(*oic), GFP_KERNEL);
+ if (oic)
+ oic->ctx = ctx;
+ return oic;
+}
+
+/**
+ * qcomtee_object_do_invoke() - Submit an invocation for an object.
+ * @oic: context to use for the current invocation.
+ * @object: object being invoked.
+ * @op: requested operation on the object.
+ * @u: array of arguments for the current invocation.
+ * @result: result returned from QTEE.
+ *
+ * The caller is responsible for keeping track of the refcount for each object,
+ * including @object. On return, the caller loses ownership of all input
+ * objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ *
+ * @object can be of %QCOMTEE_OBJECT_TYPE_ROOT or %QCOMTEE_OBJECT_TYPE_TEE.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *u, int *result);
+
+/**
+ * struct qcomtee_object_operations - Callback object operations.
+ * @release: release the object if QTEE is not using it.
+ * @dispatch: dispatch the operation requested by QTEE.
+ * @notify: report the status of any pending response submitted by @dispatch.
+ */
+struct qcomtee_object_operations {
+ void (*release)(struct qcomtee_object *object);
+ int (*dispatch)(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *args);
+ void (*notify)(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, int err);
+};
+
+/**
+ * struct qcomtee_object - QTEE or kernel object.
+ * @name: object name.
+ * @refcount: reference counter.
+ * @object_type: object type as &enum qcomtee_object_type.
+ * @info: extra information for the object.
+ * @ops: callback operations for objects of type %QCOMTEE_OBJECT_TYPE_CB.
+ * @work: work for async operations on the object.
+ *
+ * @work is used for releasing objects of %QCOMTEE_OBJECT_TYPE_TEE type.
+ */
+struct qcomtee_object {
+ const char *name;
+ struct kref refcount;
+
+ enum qcomtee_object_type object_type;
+ struct object_info {
+ unsigned long qtee_id;
+ /* TEE context for QTEE object async requests. */
+ struct tee_context *qcomtee_async_ctx;
+ } info;
+
+ struct qcomtee_object_operations *ops;
+ struct work_struct work;
+};
+
+/* Static instances of qcomtee_object objects. */
+#define NULL_QCOMTEE_OBJECT ((struct qcomtee_object *)(0))
+extern struct qcomtee_object qcomtee_object_root;
+#define ROOT_QCOMTEE_OBJECT (&qcomtee_object_root)
+
+static inline enum qcomtee_object_type
+typeof_qcomtee_object(struct qcomtee_object *object)
+{
+ if (object == NULL_QCOMTEE_OBJECT)
+ return QCOMTEE_OBJECT_TYPE_NULL;
+ return object->object_type;
+}
+
+static inline const char *qcomtee_object_name(struct qcomtee_object *object)
+{
+ if (object == NULL_QCOMTEE_OBJECT)
+ return "null";
+
+ if (!object->name)
+ return "no-name";
+ return object->name;
+}
+
+/**
+ * qcomtee_object_user_init() - Initialize an object for the user.
+ * @object: object to initialize.
+ * @ot: type of object as &enum qcomtee_object_type.
+ * @ops: instance of callbacks.
+ * @fmt: name assigned to the object.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_object_user_init(struct qcomtee_object *object,
+ enum qcomtee_object_type ot,
+ struct qcomtee_object_operations *ops,
+ const char *fmt, ...) __printf(4, 5);
+
+/* Object release is RCU protected. */
+int qcomtee_object_get(struct qcomtee_object *object);
+void qcomtee_object_put(struct qcomtee_object *object);
+
+#define qcomtee_arg_for_each(i, args) \
+ for (i = 0; args[i].type != QCOMTEE_ARG_TYPE_INV; i++)
+
+/* Next argument of type @type after index @i. */
+int qcomtee_next_arg_type(struct qcomtee_arg *u, int i,
+ enum qcomtee_arg_type type);
+
+/* Iterate over argument of given type. */
+#define qcomtee_arg_for_each_type(i, args, at) \
+ for (i = qcomtee_next_arg_type(args, 0, at); \
+ args[i].type != QCOMTEE_ARG_TYPE_INV; \
+ i = qcomtee_next_arg_type(args, i + 1, at))
+
+#define qcomtee_arg_for_each_input_buffer(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IB)
+#define qcomtee_arg_for_each_output_buffer(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OB)
+#define qcomtee_arg_for_each_input_object(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IO)
+#define qcomtee_arg_for_each_output_object(i, args) \
+ qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OO)
+
+struct qcomtee_object *
+qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic);
+
+struct qcomtee_object *
+qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *client_env, u32 uid);
+
+#endif /* QCOMTEE_OBJECT_H */
diff --git a/drivers/tee/qcomtee/shm.c b/drivers/tee/qcomtee/shm.c
new file mode 100644
index 000000000000..580bd25f98ed
--- /dev/null
+++ b/drivers/tee/qcomtee/shm.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/firmware/qcom/qcom_tzmem.h>
+#include <linux/mm.h>
+
+#include "qcomtee.h"
+
+/**
+ * define MAX_OUTBOUND_BUFFER_SIZE - Maximum size of outbound buffers.
+ *
+ * The size of outbound buffer depends on QTEE callback requests.
+ */
+#define MAX_OUTBOUND_BUFFER_SIZE SZ_4K
+
+/**
+ * define MAX_INBOUND_BUFFER_SIZE - Maximum size of the inbound buffer.
+ *
+ * The size of the inbound buffer depends on the user's requests,
+ * specifically the number of IB and OB arguments. If an invocation
+ * requires a size larger than %MAX_INBOUND_BUFFER_SIZE, the user should
+ * consider using another form of shared memory with QTEE.
+ */
+#define MAX_INBOUND_BUFFER_SIZE SZ_4M
+
+/**
+ * qcomtee_msg_buffers_alloc() - Allocate inbound and outbound buffers.
+ * @oic: context to use for the current invocation.
+ * @u: array of arguments for the current invocation.
+ *
+ * It calculates the size of inbound and outbound buffers based on the
+ * arguments in @u. It allocates the buffers from the teedev pool.
+ *
+ * Return: On success, returns 0. On error, returns < 0.
+ */
+int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_arg *u)
+{
+ struct tee_context *ctx = oic->ctx;
+ struct tee_shm *shm;
+ size_t size;
+ int i;
+
+ /* Start offset in a message for buffer arguments. */
+ size = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke,
+ qcomtee_args_len(u));
+ if (size > MAX_INBOUND_BUFFER_SIZE)
+ return -EINVAL;
+
+ /* Add size of IB arguments. */
+ qcomtee_arg_for_each_input_buffer(i, u) {
+ size = size_add(size, qcomtee_msg_offset_align(u[i].b.size));
+ if (size > MAX_INBOUND_BUFFER_SIZE)
+ return -EINVAL;
+ }
+
+ /* Add size of OB arguments. */
+ qcomtee_arg_for_each_output_buffer(i, u) {
+ size = size_add(size, qcomtee_msg_offset_align(u[i].b.size));
+ if (size > MAX_INBOUND_BUFFER_SIZE)
+ return -EINVAL;
+ }
+
+ shm = tee_shm_alloc_priv_buf(ctx, size);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /* Allocate inbound buffer. */
+ oic->in_shm = shm;
+ shm = tee_shm_alloc_priv_buf(ctx, MAX_OUTBOUND_BUFFER_SIZE);
+ if (IS_ERR(shm)) {
+ tee_shm_free(oic->in_shm);
+
+ return PTR_ERR(shm);
+ }
+ /* Allocate outbound buffer. */
+ oic->out_shm = shm;
+
+ oic->in_msg.addr = tee_shm_get_va(oic->in_shm, 0);
+ oic->in_msg.size = tee_shm_get_size(oic->in_shm);
+ oic->out_msg.addr = tee_shm_get_va(oic->out_shm, 0);
+ oic->out_msg.size = tee_shm_get_size(oic->out_shm);
+ /* QTEE assume unused buffers are zeroed. */
+ memzero_explicit(oic->in_msg.addr, oic->in_msg.size);
+ memzero_explicit(oic->out_msg.addr, oic->out_msg.size);
+
+ return 0;
+}
+
+void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic)
+{
+ tee_shm_free(oic->in_shm);
+ tee_shm_free(oic->out_shm);
+}
+
+/* Dynamic shared memory pool based on tee_dyn_shm_alloc_helper(). */
+
+static int qcomtee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ return qcom_tzmem_shm_bridge_create(shm->paddr, shm->size,
+ &shm->sec_world_id);
+}
+
+static int qcomtee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ qcom_tzmem_shm_bridge_delete(shm->sec_world_id);
+
+ return 0;
+}
+
+static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
+{
+ return tee_dyn_shm_alloc_helper(shm, size, align, qcomtee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
+{
+ tee_dyn_shm_free_helper(shm, qcomtee_shm_unregister);
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+struct tee_shm_pool *qcomtee_shm_pool_alloc(void)
+{
+ struct tee_shm_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
diff --git a/drivers/tee/qcomtee/user_obj.c b/drivers/tee/qcomtee/user_obj.c
new file mode 100644
index 000000000000..0139905f2684
--- /dev/null
+++ b/drivers/tee/qcomtee/user_obj.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "qcomtee.h"
+
+/**
+ * DOC: User Objects aka Supplicants
+ *
+ * Any userspace process with access to the TEE device file can behave as a
+ * supplicant by creating a user object. Any TEE parameter of type OBJREF with
+ * %QCOMTEE_OBJREF_FLAG_USER flag set is considered a user object.
+ *
+ * A supplicant uses qcomtee_user_object_select() (i.e. TEE_IOC_SUPPL_RECV) to
+ * receive a QTEE user object request and qcomtee_user_object_submit()
+ * (i.e. TEE_IOC_SUPPL_SEND) to submit a response. QTEE expects to receive the
+ * response, including OB and OO in a specific order in the message; parameters
+ * submitted with qcomtee_user_object_submit() should maintain this order.
+ */
+
+/**
+ * struct qcomtee_user_object - User object.
+ * @object: &struct qcomtee_object representing the user object.
+ * @ctx: context for which the user object is defined.
+ * @object_id: object ID in @ctx.
+ * @notify: notify on release.
+ *
+ * Any object managed in userspace is represented by this struct.
+ * If @notify is set, a notification message is sent back to userspace
+ * upon release.
+ */
+struct qcomtee_user_object {
+ struct qcomtee_object object;
+ struct tee_context *ctx;
+ u64 object_id;
+ bool notify;
+};
+
+#define to_qcomtee_user_object(o) \
+ container_of((o), struct qcomtee_user_object, object)
+
+static struct qcomtee_object_operations qcomtee_user_object_ops;
+
+/* Is it a user object? */
+int is_qcomtee_user_object(struct qcomtee_object *object)
+{
+ return object != NULL_QCOMTEE_OBJECT &&
+ typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB &&
+ object->ops == &qcomtee_user_object_ops;
+}
+
+/* Set the user object's 'notify on release' flag. */
+void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify)
+{
+ if (is_qcomtee_user_object(object))
+ to_qcomtee_user_object(object)->notify = notify;
+}
+
+/* Supplicant Requests: */
+
+/**
+ * enum qcomtee_req_state - Current state of request.
+ * @QCOMTEE_REQ_QUEUED: Request is waiting for supplicant.
+ * @QCOMTEE_REQ_PROCESSING: Request has been picked by the supplicant.
+ * @QCOMTEE_REQ_PROCESSED: Response has been submitted for the request.
+ */
+enum qcomtee_req_state {
+ QCOMTEE_REQ_QUEUED = 1,
+ QCOMTEE_REQ_PROCESSING,
+ QCOMTEE_REQ_PROCESSED,
+};
+
+/* User requests sent to supplicants. */
+struct qcomtee_ureq {
+ enum qcomtee_req_state state;
+
+ /* User Request: */
+ int req_id;
+ u64 object_id;
+ u32 op;
+ struct qcomtee_arg *args;
+ int errno;
+
+ struct list_head node;
+ struct completion c; /* Completion for whoever wait. */
+};
+
+/*
+ * Placeholder for a PROCESSING request in qcomtee_context.reqs_idr.
+ *
+ * If the thread that calls qcomtee_object_invoke() dies and the supplicant
+ * is processing the request, replace the entry in qcomtee_context.reqs_idr
+ * with empty_ureq. This ensures that (1) the req_id remains busy and is not
+ * reused, and (2) the supplicant fails to submit the response and performs
+ * the necessary rollback.
+ */
+static struct qcomtee_ureq empty_ureq = { .state = QCOMTEE_REQ_PROCESSING };
+
+/* Enqueue a user request for a context and assign a request ID. */
+static int ureq_enqueue(struct qcomtee_context_data *ctxdata,
+ struct qcomtee_ureq *ureq)
+{
+ int ret;
+
+ guard(mutex)(&ctxdata->reqs_lock);
+ /* Supplicant is dying. */
+ if (ctxdata->released)
+ return -ENODEV;
+
+ /* Allocate an ID and queue the request. */
+ ret = idr_alloc(&ctxdata->reqs_idr, ureq, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ureq->req_id = ret;
+ ureq->state = QCOMTEE_REQ_QUEUED;
+ list_add_tail(&ureq->node, &ctxdata->reqs_list);
+
+ return 0;
+}
+
+/**
+ * ureq_dequeue() - Dequeue a user request from a context.
+ * @ctxdata: context data for a context to dequeue the request.
+ * @req_id: ID of the request to be dequeued.
+ *
+ * It dequeues a user request and releases its request ID.
+ *
+ * Context: The caller should hold &qcomtee_context_data->reqs_lock.
+ * Return: Returns the user request associated with this ID; otherwise, NULL.
+ */
+static struct qcomtee_ureq *ureq_dequeue(struct qcomtee_context_data *ctxdata,
+ int req_id)
+{
+ struct qcomtee_ureq *ureq;
+
+ ureq = idr_remove(&ctxdata->reqs_idr, req_id);
+ if (ureq == &empty_ureq || !ureq)
+ return NULL;
+
+ list_del(&ureq->node);
+
+ return ureq;
+}
+
+/**
+ * ureq_select() - Select the next request in a context.
+ * @ctxdata: context data for a context to pop a request.
+ * @ubuf_size: size of the available buffer for UBUF parameters.
+ * @num_params: number of entries for the TEE parameter array.
+ *
+ * It checks if @num_params is large enough to fit the next request arguments.
+ * It checks if @ubuf_size is large enough to fit IB buffer arguments.
+ *
+ * Context: The caller should hold &qcomtee_context_data->reqs_lock.
+ * Return: On success, returns a request;
+ * on failure, returns NULL and ERR_PTR.
+ */
+static struct qcomtee_ureq *ureq_select(struct qcomtee_context_data *ctxdata,
+ size_t ubuf_size, int num_params)
+{
+ struct qcomtee_ureq *req, *ureq = NULL;
+ struct qcomtee_arg *u;
+ int i;
+
+ /* Find the a queued request. */
+ list_for_each_entry(req, &ctxdata->reqs_list, node) {
+ if (req->state == QCOMTEE_REQ_QUEUED) {
+ ureq = req;
+ break;
+ }
+ }
+
+ if (!ureq)
+ return NULL;
+
+ u = ureq->args;
+ /* (1) Is there enough TEE parameters? */
+ if (num_params < qcomtee_args_len(u))
+ return ERR_PTR(-EINVAL);
+ /* (2) Is there enough space to pass input buffers? */
+ qcomtee_arg_for_each_input_buffer(i, u) {
+ ubuf_size = size_sub(ubuf_size, u[i].b.size);
+ if (ubuf_size == SIZE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ ubuf_size = round_down(ubuf_size, 8);
+ }
+
+ return ureq;
+}
+
+/* Gets called when the user closes the device. */
+void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata)
+{
+ struct qcomtee_ureq *req, *ureq;
+
+ guard(mutex)(&ctxdata->reqs_lock);
+ /* So ureq_enqueue() refuses new requests from QTEE. */
+ ctxdata->released = true;
+ /* ureqs in reqs_list are in QUEUED or PROCESSING (!= empty_ureq) state. */
+ list_for_each_entry_safe(ureq, req, &ctxdata->reqs_list, node) {
+ ureq_dequeue(ctxdata, ureq->req_id);
+
+ if (ureq->op != QCOMTEE_MSG_OBJECT_OP_RELEASE) {
+ ureq->state = QCOMTEE_REQ_PROCESSED;
+ ureq->errno = -ENODEV;
+
+ complete(&ureq->c);
+ } else {
+ kfree(ureq);
+ }
+ }
+}
+
+/* User Object API. */
+
+/* User object dispatcher. */
+static int qcomtee_user_object_dispatch(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *object, u32 op,
+ struct qcomtee_arg *args)
+{
+ struct qcomtee_user_object *uo = to_qcomtee_user_object(object);
+ struct qcomtee_context_data *ctxdata = uo->ctx->data;
+ struct qcomtee_ureq *ureq __free(kfree) = NULL;
+ int errno;
+
+ ureq = kzalloc(sizeof(*ureq), GFP_KERNEL);
+ if (!ureq)
+ return -ENOMEM;
+
+ init_completion(&ureq->c);
+ ureq->object_id = uo->object_id;
+ ureq->op = op;
+ ureq->args = args;
+
+ /* Queue the request. */
+ if (ureq_enqueue(ctxdata, ureq))
+ return -ENODEV;
+ /* Wakeup supplicant to process it. */
+ complete(&ctxdata->req_c);
+
+ /*
+ * Wait for the supplicant to process the request. Wait as KILLABLE
+ * in case the supplicant and invoke thread are both running from the
+ * same process, the supplicant crashes, or the shutdown sequence
+ * starts with supplicant dies first; otherwise, it stuck indefinitely.
+ *
+ * If the supplicant processes long-running requests, also use
+ * TASK_FREEZABLE to allow the device to safely suspend if needed.
+ */
+ if (!wait_for_completion_state(&ureq->c,
+ TASK_KILLABLE | TASK_FREEZABLE)) {
+ errno = ureq->errno;
+ if (!errno)
+ oic->data = no_free_ptr(ureq);
+ } else {
+ enum qcomtee_req_state prev_state;
+
+ errno = -ENODEV;
+
+ scoped_guard(mutex, &ctxdata->reqs_lock) {
+ prev_state = ureq->state;
+ /* Replace with empty_ureq to keep req_id reserved. */
+ if (prev_state == QCOMTEE_REQ_PROCESSING) {
+ list_del(&ureq->node);
+ idr_replace(&ctxdata->reqs_idr,
+ &empty_ureq, ureq->req_id);
+
+ /* Remove as supplicant has never seen this request. */
+ } else if (prev_state == QCOMTEE_REQ_QUEUED) {
+ ureq_dequeue(ctxdata, ureq->req_id);
+ }
+ }
+
+ /* Supplicant did some work, do not discard it. */
+ if (prev_state == QCOMTEE_REQ_PROCESSED) {
+ errno = ureq->errno;
+ if (!errno)
+ oic->data = no_free_ptr(ureq);
+ }
+ }
+
+ return errno;
+}
+
+/* Gets called after submitting the dispatcher response. */
+static void qcomtee_user_object_notify(struct qcomtee_object_invoke_ctx *oic,
+ struct qcomtee_object *unused_object,
+ int err)
+{
+ struct qcomtee_ureq *ureq = oic->data;
+ struct qcomtee_arg *u = ureq->args;
+ int i;
+
+ /*
+ * If err, there was a transport issue, and QTEE did not receive the
+ * response for the dispatcher. Release the callback object created for
+ * QTEE, in addition to the copies of objects kept for the drivers.
+ */
+ qcomtee_arg_for_each_output_object(i, u) {
+ if (err &&
+ (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB))
+ qcomtee_object_put(u[i].o);
+ qcomtee_object_put(u[i].o);
+ }
+
+ kfree(ureq);
+}
+
+static void qcomtee_user_object_release(struct qcomtee_object *object)
+{
+ struct qcomtee_user_object *uo = to_qcomtee_user_object(object);
+ struct qcomtee_context_data *ctxdata = uo->ctx->data;
+ struct qcomtee_ureq *ureq;
+
+ /* RELEASE does not require any argument. */
+ static struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } };
+
+ if (!uo->notify)
+ goto out_no_notify;
+
+ ureq = kzalloc(sizeof(*ureq), GFP_KERNEL);
+ if (!ureq)
+ goto out_no_notify;
+
+ /* QUEUE a release request: */
+ ureq->object_id = uo->object_id;
+ ureq->op = QCOMTEE_MSG_OBJECT_OP_RELEASE;
+ ureq->args = args;
+ if (ureq_enqueue(ctxdata, ureq)) {
+ kfree(ureq);
+ /* Ignore the notification if it cannot be queued. */
+ goto out_no_notify;
+ }
+
+ complete(&ctxdata->req_c);
+
+out_no_notify:
+ teedev_ctx_put(uo->ctx);
+ kfree(uo);
+}
+
+static struct qcomtee_object_operations qcomtee_user_object_ops = {
+ .release = qcomtee_user_object_release,
+ .notify = qcomtee_user_object_notify,
+ .dispatch = qcomtee_user_object_dispatch,
+};
+
+/**
+ * qcomtee_user_param_to_object() - OBJREF parameter to &struct qcomtee_object.
+ * @object: object returned.
+ * @param: TEE parameter.
+ * @ctx: context in which the conversion should happen.
+ *
+ * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_USER flags.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_user_param_to_object(struct qcomtee_object **object,
+ struct tee_param *param,
+ struct tee_context *ctx)
+{
+ struct qcomtee_user_object *user_object __free(kfree) = NULL;
+ int err;
+
+ user_object = kzalloc(sizeof(*user_object), GFP_KERNEL);
+ if (!user_object)
+ return -ENOMEM;
+
+ user_object->ctx = ctx;
+ user_object->object_id = param->u.objref.id;
+ /* By default, always notify userspace upon release. */
+ user_object->notify = true;
+ err = qcomtee_object_user_init(&user_object->object,
+ QCOMTEE_OBJECT_TYPE_CB,
+ &qcomtee_user_object_ops, "uo-%llu",
+ param->u.objref.id);
+ if (err)
+ return err;
+ /* Matching teedev_ctx_put() is in qcomtee_user_object_release(). */
+ teedev_ctx_get(ctx);
+
+ *object = &no_free_ptr(user_object)->object;
+
+ return 0;
+}
+
+/* Reverse what qcomtee_user_param_to_object() does. */
+int qcomtee_user_param_from_object(struct tee_param *param,
+ struct qcomtee_object *object,
+ struct tee_context *ctx)
+{
+ struct qcomtee_user_object *uo;
+
+ uo = to_qcomtee_user_object(object);
+ /* Ensure the object is in the same context as the caller. */
+ if (uo->ctx != ctx)
+ return -EINVAL;
+
+ param->u.objref.id = uo->object_id;
+ param->u.objref.flags = QCOMTEE_OBJREF_FLAG_USER;
+
+ /* User objects are valid in userspace; do not keep a copy. */
+ qcomtee_object_put(object);
+
+ return 0;
+}
+
+/**
+ * qcomtee_cb_params_from_args() - Convert QTEE arguments to TEE parameters.
+ * @params: TEE parameters.
+ * @u: QTEE arguments.
+ * @num_params: number of elements in the parameter array.
+ * @ubuf_addr: user buffer for arguments of type %QCOMTEE_ARG_TYPE_IB.
+ * @ubuf_size: size of the user buffer.
+ * @ctx: context in which the conversion should happen.
+ *
+ * It expects @params to have enough entries for @u. Entries in @params are of
+ * %TEE_IOCTL_PARAM_ATTR_TYPE_NONE.
+ *
+ * Return: On success, returns the number of input parameters;
+ * on failure, returns < 0.
+ */
+static int qcomtee_cb_params_from_args(struct tee_param *params,
+ struct qcomtee_arg *u, int num_params,
+ void __user *ubuf_addr, size_t ubuf_size,
+ struct tee_context *ctx)
+{
+ int i, np;
+ void __user *uaddr;
+
+ qcomtee_arg_for_each(i, u) {
+ switch (u[i].type) {
+ case QCOMTEE_ARG_TYPE_IB:
+ params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT;
+
+ /* Underflow already checked in ureq_select(). */
+ ubuf_size = round_down(ubuf_size - u[i].b.size, 8);
+ uaddr = (void __user *)(ubuf_addr + ubuf_size);
+
+ params[i].u.ubuf.uaddr = uaddr;
+ params[i].u.ubuf.size = u[i].b.size;
+ if (copy_to_user(params[i].u.ubuf.uaddr, u[i].b.addr,
+ u[i].b.size))
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OB:
+ params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT;
+ /* Let the user knows the maximum size QTEE expects. */
+ params[i].u.ubuf.size = u[i].b.size;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IO:
+ params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT;
+ if (qcomtee_objref_from_arg(&params[i], &u[i], ctx))
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OO:
+ params[i].attr =
+ TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT;
+
+ break;
+ default: /* Never get here! */
+ goto out_failed;
+ }
+ }
+
+ return i;
+
+out_failed:
+ /* Undo qcomtee_objref_from_arg(). */
+ for (np = i; np >= 0; np--) {
+ if (params[np].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT)
+ qcomtee_context_del_qtee_object(&params[np], ctx);
+ }
+
+ /* Release any IO objects not processed. */
+ for (; u[i].type; i++) {
+ if (u[i].type == QCOMTEE_ARG_TYPE_IO)
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_cb_params_to_args() - Convert TEE parameters to QTEE arguments.
+ * @u: QTEE arguments.
+ * @params: TEE parameters.
+ * @num_params: number of elements in the parameter array.
+ * @ctx: context in which the conversion should happen.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+static int qcomtee_cb_params_to_args(struct qcomtee_arg *u,
+ struct tee_param *params, int num_params,
+ struct tee_context *ctx)
+{
+ int i;
+
+ qcomtee_arg_for_each(i, u) {
+ switch (u[i].type) {
+ case QCOMTEE_ARG_TYPE_IB:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT)
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OB:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT)
+ goto out_failed;
+
+ /* Client can not send more data than requested. */
+ if (params[i].u.ubuf.size > u[i].b.size)
+ goto out_failed;
+
+ if (copy_from_user(u[i].b.addr, params[i].u.ubuf.uaddr,
+ params[i].u.ubuf.size))
+ goto out_failed;
+
+ u[i].b.size = params[i].u.ubuf.size;
+
+ break;
+ case QCOMTEE_ARG_TYPE_IO:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT)
+ goto out_failed;
+
+ break;
+ case QCOMTEE_ARG_TYPE_OO:
+ if (params[i].attr !=
+ TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT)
+ goto out_failed;
+
+ if (qcomtee_objref_to_arg(&u[i], &params[i], ctx))
+ goto out_failed;
+
+ break;
+ default: /* Never get here! */
+ goto out_failed;
+ }
+ }
+
+ return 0;
+
+out_failed:
+ /* Undo qcomtee_objref_to_arg(). */
+ for (i--; i >= 0; i--) {
+ if (u[i].type != QCOMTEE_ARG_TYPE_OO)
+ continue;
+
+ qcomtee_user_object_set_notify(u[i].o, false);
+ if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)
+ qcomtee_object_put(u[i].o);
+
+ qcomtee_object_put(u[i].o);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * qcomtee_user_object_select() - Select a request for a user object.
+ * @ctx: context to look for a user object.
+ * @params: parameters for @op.
+ * @num_params: number of elements in the parameter array.
+ * @uaddr: user buffer for output UBUF parameters.
+ * @size: size of user buffer @uaddr.
+ * @data: information for the selected request.
+ *
+ * @params is filled along with @data for the selected request.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_user_object_select(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ void __user *uaddr, size_t size,
+ struct qcomtee_user_object_request_data *data)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_ureq *ureq;
+ int ret;
+
+ /*
+ * Hold the reqs_lock not only for ureq_select() and updating the ureq
+ * state to PROCESSING but for the entire duration of ureq access.
+ * This prevents qcomtee_user_object_dispatch() from freeing
+ * ureq while it is still in use, if client dies.
+ */
+
+ while (1) {
+ scoped_guard(mutex, &ctxdata->reqs_lock) {
+ ureq = ureq_select(ctxdata, size, num_params);
+ if (!ureq)
+ goto wait_for_request;
+
+ if (IS_ERR(ureq))
+ return PTR_ERR(ureq);
+
+ /* Processing the request 'QUEUED -> PROCESSING'. */
+ ureq->state = QCOMTEE_REQ_PROCESSING;
+ /* ''Prepare user request:'' */
+ data->id = ureq->req_id;
+ data->object_id = ureq->object_id;
+ data->op = ureq->op;
+ ret = qcomtee_cb_params_from_args(params, ureq->args,
+ num_params, uaddr,
+ size, ctx);
+ if (ret >= 0)
+ goto done_request;
+
+ /* Something is wrong with the request: */
+ ureq_dequeue(ctxdata, data->id);
+ /* Send error to QTEE. */
+ ureq->state = QCOMTEE_REQ_PROCESSED;
+ ureq->errno = ret;
+
+ complete(&ureq->c);
+ }
+
+ continue;
+wait_for_request:
+ /* Wait for a new QUEUED request. */
+ if (wait_for_completion_interruptible(&ctxdata->req_c))
+ return -ERESTARTSYS;
+ }
+
+done_request:
+ /* No one is waiting for the response. */
+ if (data->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) {
+ scoped_guard(mutex, &ctxdata->reqs_lock)
+ ureq_dequeue(ctxdata, data->id);
+ kfree(ureq);
+ }
+
+ data->np = ret;
+
+ return 0;
+}
+
+/**
+ * qcomtee_user_object_submit() - Submit a response for a user object.
+ * @ctx: context to look for a user object.
+ * @params: returned parameters.
+ * @num_params: number of elements in the parameter array.
+ * @req_id: request ID for the response.
+ * @errno: result of user object invocation.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcomtee_user_object_submit(struct tee_context *ctx,
+ struct tee_param *params, int num_params,
+ int req_id, int errno)
+{
+ struct qcomtee_context_data *ctxdata = ctx->data;
+ struct qcomtee_ureq *ureq;
+
+ /* See comments for reqs_lock in qcomtee_user_object_select(). */
+ guard(mutex)(&ctxdata->reqs_lock);
+
+ ureq = ureq_dequeue(ctxdata, req_id);
+ if (!ureq)
+ return -EINVAL;
+
+ ureq->state = QCOMTEE_REQ_PROCESSED;
+
+ if (!errno)
+ ureq->errno = qcomtee_cb_params_to_args(ureq->args, params,
+ num_params, ctx);
+ else
+ ureq->errno = errno;
+ /* Return errno if qcomtee_cb_params_to_args() failed; otherwise 0. */
+ if (!errno && ureq->errno)
+ errno = ureq->errno;
+ else
+ errno = 0;
+
+ /* Send result to QTEE. */
+ complete(&ureq->c);
+
+ return errno;
+}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 98da206cd761..d65d47cc154e 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -10,16 +10,16 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uaccess.h>
-#include <crypto/hash.h>
#include <crypto/sha1.h>
#include "tee_private.h"
#define TEE_NUM_DEVICES 32
-#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
#define TEE_UUID_NS_NAME_SIZE 128
@@ -40,7 +40,7 @@ static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
static DEFINE_SPINLOCK(driver_lock);
-static struct class *tee_class;
+static const struct class tee_class;
static dev_t tee_devt;
struct tee_context *teedev_open(struct tee_device *teedev)
@@ -79,6 +79,7 @@ void teedev_ctx_get(struct tee_context *ctx)
kref_get(&ctx->refcount);
}
+EXPORT_SYMBOL_GPL(teedev_ctx_get);
static void teedev_ctx_release(struct kref *ref)
{
@@ -96,11 +97,15 @@ void teedev_ctx_put(struct tee_context *ctx)
kref_put(&ctx->refcount, teedev_ctx_release);
}
+EXPORT_SYMBOL_GPL(teedev_ctx_put);
void teedev_close_context(struct tee_context *ctx)
{
struct tee_device *teedev = ctx->teedev;
+ if (teedev->desc->ops->close_context)
+ teedev->desc->ops->close_context(ctx);
+
teedev_ctx_put(ctx);
tee_device_put(teedev);
}
@@ -141,58 +146,22 @@ static int tee_release(struct inode *inode, struct file *filp)
* This implements section (for SHA-1):
* 4.3. Algorithm for Creating a Name-Based UUID
*/
-static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
- size_t size)
+static void uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
+ size_t size)
{
unsigned char hash[SHA1_DIGEST_SIZE];
- struct crypto_shash *shash = NULL;
- struct shash_desc *desc = NULL;
- int rc;
-
- shash = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(shash)) {
- rc = PTR_ERR(shash);
- pr_err("shash(sha1) allocation failed\n");
- return rc;
- }
-
- desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
- GFP_KERNEL);
- if (!desc) {
- rc = -ENOMEM;
- goto out_free_shash;
- }
-
- desc->tfm = shash;
-
- rc = crypto_shash_init(desc);
- if (rc < 0)
- goto out_free_desc;
-
- rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
- if (rc < 0)
- goto out_free_desc;
+ struct sha1_ctx ctx;
- rc = crypto_shash_update(desc, (const u8 *)name, size);
- if (rc < 0)
- goto out_free_desc;
-
- rc = crypto_shash_final(desc, hash);
- if (rc < 0)
- goto out_free_desc;
+ sha1_init(&ctx);
+ sha1_update(&ctx, (const u8 *)ns, sizeof(*ns));
+ sha1_update(&ctx, (const u8 *)name, size);
+ sha1_final(&ctx, hash);
memcpy(uuid->b, hash, UUID_SIZE);
/* Tag for version 5 */
uuid->b[6] = (hash[6] & 0x0F) | 0x50;
uuid->b[8] = (hash[8] & 0x3F) | 0x80;
-
-out_free_desc:
- kfree(desc);
-
-out_free_shash:
- crypto_free_shash(shash);
- return rc;
}
int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
@@ -202,7 +171,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
kgid_t grp = INVALID_GID;
char *name = NULL;
int name_len;
- int rc;
+ int rc = 0;
if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
@@ -259,7 +228,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
goto out_free_name;
}
- rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
+ uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
out_free_name:
kfree(name);
@@ -353,6 +322,113 @@ tee_ioctl_shm_register(struct tee_context *ctx,
return ret;
}
+static int
+tee_ioctl_shm_register_fd(struct tee_context *ctx,
+ struct tee_ioctl_shm_register_fd_data __user *udata)
+{
+ struct tee_ioctl_shm_register_fd_data data;
+ struct tee_shm *shm;
+ long ret;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ shm = tee_shm_register_fd(ctx, data.fd);
+ if (IS_ERR(shm))
+ return -EINVAL;
+
+ data.id = shm->id;
+ data.flags = shm->flags;
+ data.size = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
+static int param_from_user_memref(struct tee_context *ctx,
+ struct tee_param_memref *memref,
+ struct tee_ioctl_param *ip)
+{
+ struct tee_shm *shm;
+ size_t offs = 0;
+
+ /*
+ * If a NULL pointer is passed to a TA in the TEE,
+ * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
+ * indicating a NULL memory reference.
+ */
+ if (ip->c != TEE_MEMREF_NULL) {
+ /*
+ * If we fail to get a pointer to a shared
+ * memory object (and increase the ref count)
+ * from an identifier we return an error. All
+ * pointers that has been added in params have
+ * an increased ref count. It's the callers
+ * responibility to do tee_shm_put() on all
+ * resolved pointers.
+ */
+ shm = tee_shm_get_from_id(ctx, ip->c);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /*
+ * Ensure offset + size does not overflow
+ * offset and does not overflow the size of
+ * the referred shared memory object.
+ */
+ if ((ip->a + ip->b) < ip->a ||
+ (ip->a + ip->b) > shm->size) {
+ tee_shm_put(shm);
+ return -EINVAL;
+ }
+
+ if (shm->flags & TEE_SHM_DMA_BUF) {
+ struct tee_shm_dmabuf_ref *ref;
+
+ ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
+ if (ref->parent_shm) {
+ /*
+ * The shm already has one reference to
+ * ref->parent_shm so we are clear of 0.
+ * We're getting another reference since
+ * this shm will be used in the parameter
+ * list instead of the shm we got with
+ * tee_shm_get_from_id() above.
+ */
+ refcount_inc(&ref->parent_shm->refcount);
+ tee_shm_put(shm);
+ shm = ref->parent_shm;
+ offs = ref->offset;
+ }
+ }
+ } else if (ctx->cap_memref_null) {
+ /* Pass NULL pointer to OP-TEE */
+ shm = NULL;
+ } else {
+ return -EINVAL;
+ }
+
+ memref->shm_offs = ip->a + offs;
+ memref->size = ip->b;
+ memref->shm = shm;
+
+ return 0;
+}
+
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t num_params,
struct tee_ioctl_param __user *uparams)
@@ -360,8 +436,8 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
size_t n;
for (n = 0; n < num_params; n++) {
- struct tee_shm *shm;
struct tee_ioctl_param ip;
+ int rc;
if (copy_from_user(&ip, uparams + n, sizeof(ip)))
return -EFAULT;
@@ -374,6 +450,7 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
break;
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
@@ -381,48 +458,29 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
params[n].u.value.b = ip.b;
params[n].u.value.c = ip.c;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ params[n].u.ubuf.uaddr = u64_to_user_ptr(ip.a);
+ params[n].u.ubuf.size = ip.b;
+
+ if (!access_ok(params[n].u.ubuf.uaddr,
+ params[n].u.ubuf.size))
+ return -EFAULT;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ params[n].u.objref.id = ip.a;
+ params[n].u.objref.flags = ip.b;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- /*
- * If a NULL pointer is passed to a TA in the TEE,
- * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
- * indicating a NULL memory reference.
- */
- if (ip.c != TEE_MEMREF_NULL) {
- /*
- * If we fail to get a pointer to a shared
- * memory object (and increase the ref count)
- * from an identifier we return an error. All
- * pointers that has been added in params have
- * an increased ref count. It's the callers
- * responibility to do tee_shm_put() on all
- * resolved pointers.
- */
- shm = tee_shm_get_from_id(ctx, ip.c);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
-
- /*
- * Ensure offset + size does not overflow
- * offset and does not overflow the size of
- * the referred shared memory object.
- */
- if ((ip.a + ip.b) < ip.a ||
- (ip.a + ip.b) > shm->size) {
- tee_shm_put(shm);
- return -EINVAL;
- }
- } else if (ctx->cap_memref_null) {
- /* Pass NULL pointer to OP-TEE */
- shm = NULL;
- } else {
- return -EINVAL;
- }
-
- params[n].u.memref.shm_offs = ip.a;
- params[n].u.memref.size = ip.b;
- params[n].u.memref.shm = shm;
+ rc = param_from_user_memref(ctx, &params[n].u.memref,
+ &ip);
+ if (rc)
+ return rc;
break;
default:
/* Unknown attribute */
@@ -449,6 +507,17 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
put_user(p->u.value.c, &up->c))
return -EFAULT;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ if (put_user((u64)p->u.ubuf.size, &up->b))
+ return -EFAULT;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ if (put_user(p->u.objref.id, &up->a) ||
+ put_user(p->u.objref.flags, &up->b))
+ return -EFAULT;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
if (put_user((u64)p->u.memref.size, &up->b))
@@ -487,7 +556,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
- if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
return -EINVAL;
if (arg.num_params) {
@@ -565,7 +634,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx,
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
- if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
return -EINVAL;
if (arg.num_params) {
@@ -601,6 +670,66 @@ out:
return rc;
}
+static int tee_ioctl_object_invoke(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_object_invoke_arg __user *uarg;
+ struct tee_ioctl_object_invoke_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+
+ if (!ctx->teedev->desc->ops->object_invoke_func)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_object_invoke_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->object_invoke_func(ctx, &arg, params);
+ if (rc)
+ goto out;
+
+ if (put_user(arg.ret, &uarg->ret)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (tee_param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+ return rc;
+}
+
static int tee_ioctl_cancel(struct tee_context *ctx,
struct tee_ioctl_cancel_arg __user *uarg)
{
@@ -649,6 +778,19 @@ static int params_to_supp(struct tee_context *ctx,
ip.b = p->u.value.b;
ip.c = p->u.value.c;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ ip.a = (__force unsigned long)p->u.ubuf.uaddr;
+ ip.b = p->u.ubuf.size;
+ ip.c = 0;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ ip.a = p->u.objref.id;
+ ip.b = p->u.objref.flags;
+ ip.c = 0;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
@@ -699,7 +841,7 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (get_user(num_params, &uarg->num_params))
return -EFAULT;
- if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+ if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
return -EINVAL;
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
@@ -751,6 +893,21 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
p->u.value.b = ip.b;
p->u.value.c = ip.c;
break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT:
+ p->u.ubuf.uaddr = u64_to_user_ptr(ip.a);
+ p->u.ubuf.size = ip.b;
+
+ if (!access_ok(params[n].u.ubuf.uaddr,
+ params[n].u.ubuf.size))
+ return -EFAULT;
+
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT:
+ p->u.objref.id = ip.a;
+ p->u.objref.flags = ip.b;
+ break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
/*
@@ -798,7 +955,7 @@ static int tee_ioctl_supp_send(struct tee_context *ctx,
get_user(num_params, &uarg->num_params))
return -EFAULT;
- if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+ if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
return -EINVAL;
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
@@ -827,10 +984,14 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return tee_ioctl_shm_alloc(ctx, uarg);
case TEE_IOC_SHM_REGISTER:
return tee_ioctl_shm_register(ctx, uarg);
+ case TEE_IOC_SHM_REGISTER_FD:
+ return tee_ioctl_shm_register_fd(ctx, uarg);
case TEE_IOC_OPEN_SESSION:
return tee_ioctl_open_session(ctx, uarg);
case TEE_IOC_INVOKE:
return tee_ioctl_invoke(ctx, uarg);
+ case TEE_IOC_OBJECT_INVOKE:
+ return tee_ioctl_object_invoke(ctx, uarg);
case TEE_IOC_CANCEL:
return tee_ioctl_cancel(ctx, uarg);
case TEE_IOC_CLOSE_SESSION:
@@ -888,7 +1049,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
if (!teedesc || !teedesc->name || !teedesc->ops ||
!teedesc->ops->get_version || !teedesc->ops->open ||
- !teedesc->ops->release || !pool)
+ !teedesc->ops->release)
return ERR_PTR(-EINVAL);
teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
@@ -919,7 +1080,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
teedev->id - offs);
- teedev->dev.class = tee_class;
+ teedev->dev.class = &tee_class;
teedev->dev.release = tee_release_device;
teedev->dev.parent = dev;
@@ -962,6 +1123,13 @@ err:
}
EXPORT_SYMBOL_GPL(tee_device_alloc);
+void tee_device_set_dev_groups(struct tee_device *teedev,
+ const struct attribute_group **dev_groups)
+{
+ teedev->dev.groups = dev_groups;
+}
+EXPORT_SYMBOL_GPL(tee_device_set_dev_groups);
+
static ssize_t implementation_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -969,7 +1137,7 @@ static ssize_t implementation_id_show(struct device *dev,
struct tee_ioctl_version_data vers;
teedev->desc->ops->get_version(teedev, &vers);
- return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
+ return sysfs_emit(buf, "%d\n", vers.impl_id);
}
static DEVICE_ATTR_RO(implementation_id);
@@ -980,6 +1148,11 @@ static struct attribute *tee_dev_attrs[] = {
ATTRIBUTE_GROUPS(tee_dev);
+static const struct class tee_class = {
+ .name = "tee",
+ .dev_groups = tee_dev_groups,
+};
+
/**
* tee_device_register() - Registers a TEE device
* @teedev: Device to register
@@ -998,8 +1171,6 @@ int tee_device_register(struct tee_device *teedev)
return -EINVAL;
}
- teedev->dev.groups = tee_dev_groups;
-
rc = cdev_device_add(&teedev->cdev, &teedev->dev);
if (rc) {
dev_err(&teedev->dev,
@@ -1027,6 +1198,7 @@ void tee_device_put(struct tee_device *teedev)
}
mutex_unlock(&teedev->mutex);
}
+EXPORT_SYMBOL_GPL(tee_device_put);
bool tee_device_get(struct tee_device *teedev)
{
@@ -1039,6 +1211,7 @@ bool tee_device_get(struct tee_device *teedev)
mutex_unlock(&teedev->mutex);
return true;
}
+EXPORT_SYMBOL_GPL(tee_device_get);
/**
* tee_device_unregister() - Removes a TEE device
@@ -1053,6 +1226,8 @@ void tee_device_unregister(struct tee_device *teedev)
if (!teedev)
return;
+ tee_device_put_all_dma_heaps(teedev);
+
if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
cdev_device_del(&teedev->cdev, &teedev->dev);
@@ -1112,7 +1287,7 @@ tee_client_open_context(struct tee_context *start,
dev = &start->teedev->dev;
do {
- dev = class_find_device(tee_class, dev, &match_data, match_dev);
+ dev = class_find_device(&tee_class, dev, &match_data, match_dev);
if (!dev) {
ctx = ERR_PTR(-ENOENT);
break;
@@ -1170,6 +1345,14 @@ int tee_client_close_session(struct tee_context *ctx, u32 session)
}
EXPORT_SYMBOL_GPL(tee_client_close_session);
+int tee_client_system_session(struct tee_context *ctx, u32 session)
+{
+ if (!ctx->teedev->desc->ops->system_session)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->system_session(ctx, session);
+}
+EXPORT_SYMBOL_GPL(tee_client_system_session);
+
int tee_client_invoke_func(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param)
@@ -1190,7 +1373,7 @@ int tee_client_cancel_req(struct tee_context *ctx,
}
static int tee_client_device_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
const struct tee_client_device_id *id_table;
struct tee_client_device *tee_device;
@@ -1207,7 +1390,7 @@ static int tee_client_device_match(struct device *dev,
return 0;
}
-static int tee_client_device_uevent(struct device *dev,
+static int tee_client_device_uevent(const struct device *dev,
struct kobj_uevent_env *env)
{
uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
@@ -1215,7 +1398,7 @@ static int tee_client_device_uevent(struct device *dev,
return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
}
-struct bus_type tee_bus_type = {
+const struct bus_type tee_bus_type = {
.name = "tee",
.match = tee_client_device_match,
.uevent = tee_client_device_uevent,
@@ -1226,10 +1409,10 @@ static int __init tee_init(void)
{
int rc;
- tee_class = class_create(THIS_MODULE, "tee");
- if (IS_ERR(tee_class)) {
+ rc = class_register(&tee_class);
+ if (rc) {
pr_err("couldn't create class\n");
- return PTR_ERR(tee_class);
+ return rc;
}
rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
@@ -1249,8 +1432,7 @@ static int __init tee_init(void)
out_unreg_chrdev:
unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
out_unreg_class:
- class_destroy(tee_class);
- tee_class = NULL;
+ class_unregister(&tee_class);
return rc;
}
@@ -1259,8 +1441,7 @@ static void __exit tee_exit(void)
{
bus_unregister(&tee_bus_type);
unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
- class_destroy(tee_class);
- tee_class = NULL;
+ class_unregister(&tee_class);
}
subsys_initcall(tee_init);
@@ -1270,3 +1451,5 @@ MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("TEE Driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_IMPORT_NS("DMA_BUF_HEAP");
diff --git a/drivers/tee/tee_heap.c b/drivers/tee/tee_heap.c
new file mode 100644
index 000000000000..d8d7735cdffb
--- /dev/null
+++ b/drivers/tee/tee_heap.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Linaro Limited
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/genalloc.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/tee_core.h>
+#include <linux/xarray.h>
+
+#include "tee_private.h"
+
+struct tee_dma_heap {
+ struct dma_heap *heap;
+ enum tee_dma_heap_id id;
+ struct kref kref;
+ struct tee_protmem_pool *pool;
+ struct tee_device *teedev;
+ bool shutting_down;
+ /* Protects pool, teedev, and shutting_down above */
+ struct mutex mu;
+};
+
+struct tee_heap_buffer {
+ struct tee_dma_heap *heap;
+ size_t size;
+ size_t offs;
+ struct sg_table table;
+};
+
+struct tee_heap_attachment {
+ struct sg_table table;
+ struct device *dev;
+};
+
+struct tee_protmem_static_pool {
+ struct tee_protmem_pool pool;
+ struct gen_pool *gen_pool;
+ phys_addr_t pa_base;
+};
+
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+static DEFINE_XARRAY_ALLOC(tee_dma_heap);
+
+static void tee_heap_release(struct kref *kref)
+{
+ struct tee_dma_heap *h = container_of(kref, struct tee_dma_heap, kref);
+
+ h->pool->ops->destroy_pool(h->pool);
+ tee_device_put(h->teedev);
+ h->pool = NULL;
+ h->teedev = NULL;
+}
+
+static void put_tee_heap(struct tee_dma_heap *h)
+{
+ kref_put(&h->kref, tee_heap_release);
+}
+
+static void get_tee_heap(struct tee_dma_heap *h)
+{
+ kref_get(&h->kref);
+}
+
+static int copy_sg_table(struct sg_table *dst, struct sg_table *src)
+{
+ struct scatterlist *dst_sg;
+ struct scatterlist *src_sg;
+ int ret;
+ int i;
+
+ ret = sg_alloc_table(dst, src->orig_nents, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ dst_sg = dst->sgl;
+ for_each_sgtable_sg(src, src_sg, i) {
+ sg_set_page(dst_sg, sg_page(src_sg), src_sg->length,
+ src_sg->offset);
+ dst_sg = sg_next(dst_sg);
+ }
+
+ return 0;
+}
+
+static int tee_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct tee_heap_buffer *buf = dmabuf->priv;
+ struct tee_heap_attachment *a;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = copy_sg_table(&a->table, &buf->table);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ attachment->priv = a;
+
+ return 0;
+}
+
+static void tee_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct tee_heap_attachment *a = attachment->priv;
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static struct sg_table *
+tee_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct tee_heap_attachment *a = attachment->priv;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, &a->table, direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &a->table;
+}
+
+static void tee_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct tee_heap_attachment *a = attachment->priv;
+
+ WARN_ON(&a->table != table);
+
+ dma_unmap_sgtable(attachment->dev, table, direction,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void tee_heap_buf_free(struct dma_buf *dmabuf)
+{
+ struct tee_heap_buffer *buf = dmabuf->priv;
+
+ buf->heap->pool->ops->free(buf->heap->pool, &buf->table);
+ mutex_lock(&buf->heap->mu);
+ put_tee_heap(buf->heap);
+ mutex_unlock(&buf->heap->mu);
+ kfree(buf);
+}
+
+static const struct dma_buf_ops tee_heap_buf_ops = {
+ .attach = tee_heap_attach,
+ .detach = tee_heap_detach,
+ .map_dma_buf = tee_heap_map_dma_buf,
+ .unmap_dma_buf = tee_heap_unmap_dma_buf,
+ .release = tee_heap_buf_free,
+};
+
+static struct dma_buf *tee_dma_heap_alloc(struct dma_heap *heap,
+ unsigned long len, u32 fd_flags,
+ u64 heap_flags)
+{
+ struct tee_dma_heap *h = dma_heap_get_drvdata(heap);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct tee_device *teedev = NULL;
+ struct tee_heap_buffer *buf;
+ struct tee_protmem_pool *pool;
+ struct dma_buf *dmabuf;
+ int rc;
+
+ mutex_lock(&h->mu);
+ if (h->teedev) {
+ teedev = h->teedev;
+ pool = h->pool;
+ get_tee_heap(h);
+ }
+ mutex_unlock(&h->mu);
+
+ if (!teedev)
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ dmabuf = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+ buf->size = len;
+ buf->heap = h;
+
+ rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs);
+ if (rc) {
+ dmabuf = ERR_PTR(rc);
+ goto err_kfree;
+ }
+
+ exp_info.ops = &tee_heap_buf_ops;
+ exp_info.size = len;
+ exp_info.priv = buf;
+ exp_info.flags = fd_flags;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf))
+ goto err_protmem_free;
+
+ return dmabuf;
+
+err_protmem_free:
+ pool->ops->free(pool, &buf->table);
+err_kfree:
+ kfree(buf);
+err:
+ mutex_lock(&h->mu);
+ put_tee_heap(h);
+ mutex_unlock(&h->mu);
+ return dmabuf;
+}
+
+static const struct dma_heap_ops tee_dma_heap_ops = {
+ .allocate = tee_dma_heap_alloc,
+};
+
+static const char *heap_id_2_name(enum tee_dma_heap_id id)
+{
+ switch (id) {
+ case TEE_DMA_HEAP_SECURE_VIDEO_PLAY:
+ return "protected,secure-video";
+ case TEE_DMA_HEAP_TRUSTED_UI:
+ return "protected,trusted-ui";
+ case TEE_DMA_HEAP_SECURE_VIDEO_RECORD:
+ return "protected,secure-video-record";
+ default:
+ return NULL;
+ }
+}
+
+static int alloc_dma_heap(struct tee_device *teedev, enum tee_dma_heap_id id,
+ struct tee_protmem_pool *pool)
+{
+ struct dma_heap_export_info exp_info = {
+ .ops = &tee_dma_heap_ops,
+ .name = heap_id_2_name(id),
+ };
+ struct tee_dma_heap *h;
+ int rc;
+
+ if (!exp_info.name)
+ return -EINVAL;
+
+ if (xa_reserve(&tee_dma_heap, id, GFP_KERNEL)) {
+ if (!xa_load(&tee_dma_heap, id))
+ return -EEXIST;
+ return -ENOMEM;
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+ h->id = id;
+ kref_init(&h->kref);
+ h->teedev = teedev;
+ h->pool = pool;
+ mutex_init(&h->mu);
+
+ exp_info.priv = h;
+ h->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(h->heap)) {
+ rc = PTR_ERR(h->heap);
+ kfree(h);
+
+ return rc;
+ }
+
+ /* "can't fail" due to the call to xa_reserve() above */
+ return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL)));
+}
+
+int tee_device_register_dma_heap(struct tee_device *teedev,
+ enum tee_dma_heap_id id,
+ struct tee_protmem_pool *pool)
+{
+ struct tee_dma_heap *h;
+ int rc;
+
+ if (!tee_device_get(teedev))
+ return -EINVAL;
+
+ h = xa_load(&tee_dma_heap, id);
+ if (h) {
+ mutex_lock(&h->mu);
+ if (h->teedev) {
+ rc = -EBUSY;
+ } else {
+ kref_init(&h->kref);
+ h->shutting_down = false;
+ h->teedev = teedev;
+ h->pool = pool;
+ rc = 0;
+ }
+ mutex_unlock(&h->mu);
+ } else {
+ rc = alloc_dma_heap(teedev, id, pool);
+ }
+
+ if (rc) {
+ tee_device_put(teedev);
+ dev_err(&teedev->dev, "can't register DMA heap id %d (%s)\n",
+ id, heap_id_2_name(id));
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
+
+void tee_device_put_all_dma_heaps(struct tee_device *teedev)
+{
+ struct tee_dma_heap *h;
+ u_long i;
+
+ xa_for_each(&tee_dma_heap, i, h) {
+ if (h) {
+ mutex_lock(&h->mu);
+ if (h->teedev == teedev && !h->shutting_down) {
+ h->shutting_down = true;
+ put_tee_heap(h);
+ }
+ mutex_unlock(&h->mu);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
+
+int tee_heap_update_from_dma_buf(struct tee_device *teedev,
+ struct dma_buf *dmabuf, size_t *offset,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct tee_heap_buffer *buf;
+ int rc;
+
+ /* The DMA-buf must be from our heap */
+ if (dmabuf->ops != &tee_heap_buf_ops)
+ return -EINVAL;
+
+ buf = dmabuf->priv;
+ /* The buffer must be from the same teedev */
+ if (buf->heap->teedev != teedev)
+ return -EINVAL;
+
+ shm->size = buf->size;
+
+ rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table,
+ buf->offs, shm, parent_shm);
+ if (!rc && *parent_shm)
+ *offset = buf->offs;
+
+ return rc;
+}
+#else
+int tee_device_register_dma_heap(struct tee_device *teedev __always_unused,
+ enum tee_dma_heap_id id __always_unused,
+ struct tee_protmem_pool *pool __always_unused)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tee_device_register_dma_heap);
+
+void
+tee_device_put_all_dma_heaps(struct tee_device *teedev __always_unused)
+{
+}
+EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps);
+
+int tee_heap_update_from_dma_buf(struct tee_device *teedev __always_unused,
+ struct dma_buf *dmabuf __always_unused,
+ size_t *offset __always_unused,
+ struct tee_shm *shm __always_unused,
+ struct tee_shm **parent_shm __always_unused)
+{
+ return -EINVAL;
+}
+#endif
+
+static struct tee_protmem_static_pool *
+to_protmem_static_pool(struct tee_protmem_pool *pool)
+{
+ return container_of(pool, struct tee_protmem_static_pool, pool);
+}
+
+static int protmem_pool_op_static_alloc(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t size,
+ size_t *offs)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+ phys_addr_t pa;
+ int ret;
+
+ pa = gen_pool_alloc(stp->gen_pool, size);
+ if (!pa)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret) {
+ gen_pool_free(stp->gen_pool, pa, size);
+ return ret;
+ }
+
+ sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
+ *offs = pa - stp->pa_base;
+
+ return 0;
+}
+
+static void protmem_pool_op_static_free(struct tee_protmem_pool *pool,
+ struct sg_table *sgt)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ gen_pool_free(stp->gen_pool, sg_phys(sg), sg->length);
+ sg_free_table(sgt);
+}
+
+static int protmem_pool_op_static_update_shm(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t offs,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+
+ shm->paddr = stp->pa_base + offs;
+ *parent_shm = NULL;
+
+ return 0;
+}
+
+static void protmem_pool_op_static_destroy_pool(struct tee_protmem_pool *pool)
+{
+ struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool);
+
+ gen_pool_destroy(stp->gen_pool);
+ kfree(stp);
+}
+
+static struct tee_protmem_pool_ops protmem_pool_ops_static = {
+ .alloc = protmem_pool_op_static_alloc,
+ .free = protmem_pool_op_static_free,
+ .update_shm = protmem_pool_op_static_update_shm,
+ .destroy_pool = protmem_pool_op_static_destroy_pool,
+};
+
+struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
+ size_t size)
+{
+ const size_t page_mask = PAGE_SIZE - 1;
+ struct tee_protmem_static_pool *stp;
+ int rc;
+
+ /* Check it's page aligned */
+ if ((paddr | size) & page_mask)
+ return ERR_PTR(-EINVAL);
+
+ if (!pfn_valid(PHYS_PFN(paddr)))
+ return ERR_PTR(-EINVAL);
+
+ stp = kzalloc(sizeof(*stp), GFP_KERNEL);
+ if (!stp)
+ return ERR_PTR(-ENOMEM);
+
+ stp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!stp->gen_pool) {
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ rc = gen_pool_add(stp->gen_pool, paddr, size, -1);
+ if (rc)
+ goto err_free_pool;
+
+ stp->pool.ops = &protmem_pool_ops_static;
+ stp->pa_base = paddr;
+ return &stp->pool;
+
+err_free_pool:
+ gen_pool_destroy(stp->gen_pool);
+err_free:
+ kfree(stp);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_protmem_static_pool_alloc);
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 409cadcc1cff..6bde688bfcb1 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -8,57 +8,28 @@
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/types.h>
-#define TEE_DEVICE_FLAG_REGISTERED 0x1
-#define TEE_MAX_DEV_NAME_LEN 32
-
-/**
- * struct tee_device - TEE Device representation
- * @name: name of device
- * @desc: description of device
- * @id: unique id of device
- * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
- * @dev: embedded basic device structure
- * @cdev: embedded cdev
- * @num_users: number of active users of this device
- * @c_no_user: completion used when unregistering the device
- * @mutex: mutex protecting @num_users and @idr
- * @idr: register of user space shared memory objects allocated or
- * registered on this device
- * @pool: shared memory pool
- */
-struct tee_device {
- char name[TEE_MAX_DEV_NAME_LEN];
- const struct tee_desc *desc;
- int id;
- unsigned int flags;
-
- struct device dev;
- struct cdev cdev;
-
- size_t num_users;
- struct completion c_no_users;
- struct mutex mutex; /* protects num_users and idr */
-
- struct idr idr;
- struct tee_shm_pool *pool;
+/* extra references appended to shm object for registered shared memory */
+struct tee_shm_dmabuf_ref {
+ struct tee_shm shm;
+ size_t offset;
+ struct dma_buf *dmabuf;
+ struct tee_shm *parent_shm;
};
-int tee_shm_init(void);
-
int tee_shm_get_fd(struct tee_shm *shm);
-bool tee_device_get(struct tee_device *teedev);
-void tee_device_put(struct tee_device *teedev);
-
-void teedev_ctx_get(struct tee_context *ctx);
-void teedev_ctx_put(struct tee_context *ctx);
-
struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size);
struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
unsigned long addr, size_t length);
+int tee_heap_update_from_dma_buf(struct tee_device *teedev,
+ struct dma_buf *dmabuf, size_t *offset,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm);
+
#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 27295bda3e0b..4a47de4bb2e5 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -4,15 +4,25 @@
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
+struct tee_shm_dma_mem {
+ struct tee_shm shm;
+ dma_addr_t dma_addr;
+ struct page *page;
+};
+
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
@@ -21,41 +31,12 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
put_page(pages[n]);
}
-static int shm_get_kernel_pages(unsigned long start, size_t page_count,
- struct page **pages)
+static void shm_get_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
- int rc;
-
- if (is_vmalloc_addr((void *)start)) {
- struct page *page;
-
- for (n = 0; n < page_count; n++) {
- page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
- if (!page)
- return -ENOMEM;
-
- get_page(page);
- pages[n] = page;
- }
- rc = page_count;
- } else {
- struct kvec *kiov;
-
- kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
- if (!kiov)
- return -ENOMEM;
- for (n = 0; n < page_count; n++) {
- kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
- kiov[n].iov_len = PAGE_SIZE;
- }
-
- rc = get_kernel_pages(kiov, page_count, 0, pages);
- kfree(kiov);
- }
-
- return rc;
+ for (n = 0; n < page_count; n++)
+ get_page(pages[n]);
}
static void release_registered_pages(struct tee_shm *shm)
@@ -72,7 +53,24 @@ static void release_registered_pages(struct tee_shm *shm)
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_POOL) {
+ void *p = shm;
+
+ if (shm->flags & TEE_SHM_DMA_MEM) {
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+ struct tee_shm_dma_mem *dma_mem;
+
+ dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
+ p = dma_mem;
+ dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
+ dma_mem->dma_addr, DMA_BIDIRECTIONAL);
+#endif
+ } else if (shm->flags & TEE_SHM_DMA_BUF) {
+ struct tee_shm_dmabuf_ref *ref;
+
+ ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
+ p = ref;
+ dma_buf_put(ref->dmabuf);
+ } else if (shm->flags & TEE_SHM_POOL) {
teedev->pool->ops->free(teedev->pool, shm);
} else if (shm->flags & TEE_SHM_DYNAMIC) {
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
@@ -86,7 +84,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
teedev_ctx_put(shm->ctx);
- kfree(shm);
+ kfree(p);
tee_device_put(teedev);
}
@@ -196,7 +194,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
* tee_client_invoke_func(). The memory allocated is later freed with a
* call to tee_shm_free().
*
- * @returns a pointer to 'struct tee_shm'
+ * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
*/
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
{
@@ -206,6 +204,62 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
+{
+ struct tee_shm_dmabuf_ref *ref;
+ int rc;
+
+ if (!tee_device_get(ctx->teedev))
+ return ERR_PTR(-EINVAL);
+
+ teedev_ctx_get(ctx);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ rc = -ENOMEM;
+ goto err_put_tee;
+ }
+
+ refcount_set(&ref->shm.refcount, 1);
+ ref->shm.ctx = ctx;
+ ref->shm.id = -1;
+ ref->shm.flags = TEE_SHM_DMA_BUF;
+
+ ref->dmabuf = dma_buf_get(fd);
+ if (IS_ERR(ref->dmabuf)) {
+ rc = PTR_ERR(ref->dmabuf);
+ goto err_kfree_ref;
+ }
+
+ rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
+ &ref->offset, &ref->shm,
+ &ref->parent_shm);
+ if (rc)
+ goto err_put_dmabuf;
+
+ mutex_lock(&ref->shm.ctx->teedev->mutex);
+ ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
+ 1, 0, GFP_KERNEL);
+ mutex_unlock(&ref->shm.ctx->teedev->mutex);
+ if (ref->shm.id < 0) {
+ rc = ref->shm.id;
+ goto err_put_dmabuf;
+ }
+
+ return &ref->shm;
+
+err_put_dmabuf:
+ dma_buf_put(ref->dmabuf);
+err_kfree_ref:
+ kfree(ref);
+err_put_tee:
+ teedev_ctx_put(ctx);
+ tee_device_put(ctx->teedev);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_shm_register_fd);
+
/**
* tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
* kernel buffer
@@ -230,14 +284,146 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+/**
+ * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
+ * @ctx: Context that allocates the shared memory
+ * @page_count: Number of pages
+ *
+ * The allocated memory is expected to be lent (made inaccessible to the
+ * kernel) to the TEE while it's used and returned (accessible to the
+ * kernel again) before it's freed.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_dma_mem *dma_mem;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
+ &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!page)
+ goto err_put_teedev;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
+ if (!dma_mem)
+ goto err_free_pages;
+
+ refcount_set(&dma_mem->shm.refcount, 1);
+ dma_mem->shm.ctx = ctx;
+ dma_mem->shm.paddr = page_to_phys(page);
+ dma_mem->dma_addr = dma_addr;
+ dma_mem->page = page;
+ dma_mem->shm.size = page_count * PAGE_SIZE;
+ dma_mem->shm.flags = TEE_SHM_DMA_MEM;
+
+ teedev_ctx_get(ctx);
+
+ return &dma_mem->shm;
+
+err_free_pages:
+ dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
+ DMA_BIDIRECTIONAL);
+err_put_teedev:
+ tee_device_put(teedev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#else
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#endif
+
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
+{
+ size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ struct page **pages;
+ unsigned int i;
+ int rc = 0;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!shm->kaddr)
+ return -ENOMEM;
+
+ shm->paddr = virt_to_phys(shm->kaddr);
+ shm->size = nr_pages * PAGE_SIZE;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err_pages;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
+
+ shm->pages = pages;
+ shm->num_pages = nr_pages;
+
+ if (shm_register) {
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ if (rc)
+ goto err_kfree;
+ }
+
+ return 0;
+err_kfree:
+ kfree(pages);
+err_pages:
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
+
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ kfree(shm->pages);
+ shm->pages = NULL;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
+
static struct tee_shm *
-register_shm_helper(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags, int id)
+register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
+ int id)
{
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
- unsigned long start;
- size_t num_pages;
+ unsigned long start, addr;
+ size_t num_pages, off;
+ ssize_t len;
void *ret;
int rc;
@@ -262,31 +448,46 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
shm->flags = flags;
shm->ctx = ctx;
shm->id = id;
- addr = untagged_addr(addr);
+ addr = untagged_addr((unsigned long)iter_iov_addr(iter));
start = rounddown(addr, PAGE_SIZE);
- shm->offset = addr - start;
- shm->size = length;
- num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ num_pages = iov_iter_npages(iter, INT_MAX);
+ if (!num_pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_ctx_put;
+ }
+
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
if (!shm->pages) {
ret = ERR_PTR(-ENOMEM);
goto err_free_shm;
}
- if (flags & TEE_SHM_USER_MAPPED)
- rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
- shm->pages);
- else
- rc = shm_get_kernel_pages(start, num_pages, shm->pages);
- if (rc > 0)
- shm->num_pages = rc;
- if (rc != num_pages) {
- if (rc >= 0)
- rc = -ENOMEM;
- ret = ERR_PTR(rc);
+ len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
+ &off);
+ if (unlikely(len <= 0)) {
+ ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
+ goto err_free_shm_pages;
+ } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
+ /*
+ * If we only got a few pages, update to release the
+ * correct amount below.
+ */
+ shm->num_pages = len / PAGE_SIZE;
+ ret = ERR_PTR(-ENOMEM);
goto err_put_shm_pages;
}
+ /*
+ * iov_iter_extract_kvec_pages does not get reference on the pages,
+ * get a reference on them.
+ */
+ if (iov_iter_is_kvec(iter))
+ shm_get_kernel_pages(shm->pages, num_pages);
+
+ shm->offset = off;
+ shm->size = len;
+ shm->num_pages = num_pages;
+
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
shm->num_pages, start);
if (rc) {
@@ -296,10 +497,11 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
return shm;
err_put_shm_pages:
- if (flags & TEE_SHM_USER_MAPPED)
+ if (!iov_iter_is_kvec(iter))
unpin_user_pages(shm->pages, shm->num_pages);
else
shm_put_kernel_pages(shm->pages, shm->num_pages);
+err_free_shm_pages:
kfree(shm->pages);
err_free_shm:
kfree(shm);
@@ -324,6 +526,7 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
+ struct iov_iter iter;
void *ret;
int id;
@@ -336,7 +539,8 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
if (id < 0)
return ERR_PTR(id);
- shm = register_shm_helper(ctx, addr, length, flags, id);
+ iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
+ shm = register_shm_helper(ctx, &iter, flags, id);
if (IS_ERR(shm)) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, id);
@@ -369,8 +573,14 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length)
{
u32 flags = TEE_SHM_DYNAMIC;
+ struct kvec kvec;
+ struct iov_iter iter;
+
+ kvec.iov_base = addr;
+ kvec.iov_len = length;
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
- return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
+ return register_shm_helper(ctx, &iter, flags, -1);
}
EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
@@ -388,6 +598,9 @@ static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
+ /* Refuse sharing registered DMA_bufs with the application */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ return -EINVAL;
/* check for overflowing the buffer's size */
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
@@ -506,9 +719,13 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
*/
void tee_shm_put(struct tee_shm *shm)
{
- struct tee_device *teedev = shm->ctx->teedev;
+ struct tee_device *teedev;
bool do_release = false;
+ if (!shm || !shm->ctx || !shm->ctx->teedev)
+ return;
+
+ teedev = shm->ctx->teedev;
mutex_lock(&teedev->mutex);
if (refcount_dec_and_test(&shm->refcount)) {
/*
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index 058bfbac657a..80004b55628d 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include "tee_private.h"
static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
diff --git a/drivers/tee/tstee/Kconfig b/drivers/tee/tstee/Kconfig
new file mode 100644
index 000000000000..d32f91d47398
--- /dev/null
+++ b/drivers/tee/tstee/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_TSTEE
+ tristate "Arm Trusted Services TEE driver"
+ depends on ARM_FFA_TRANSPORT
+ default n
+ help
+ The Trusted Services project provides a framework for developing and
+ deploying device Root of Trust services in FF-A Secure Partitions.
+ This driver provides an interface to make Trusted Services Secure
+ Partitions accessible for user space clients, since the FF-A driver
+ doesn't implement a user space interface directly.
diff --git a/drivers/tee/tstee/Makefile b/drivers/tee/tstee/Makefile
new file mode 100644
index 000000000000..5227020ebd30
--- /dev/null
+++ b/drivers/tee/tstee/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+arm-tstee-objs := core.o
+obj-$(CONFIG_ARM_TSTEE) = arm-tstee.o
diff --git a/drivers/tee/tstee/core.c b/drivers/tee/tstee/core.c
new file mode 100644
index 000000000000..533425e9e9e7
--- /dev/null
+++ b/drivers/tee/tstee/core.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Arm Limited
+ */
+
+#include <linux/arm_ffa.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+#include "tstee_private.h"
+
+#define FFA_DIRECT_REQ_ARG_NUM 5
+#define FFA_INVALID_MEM_HANDLE U64_MAX
+
+static void arg_list_to_ffa_data(const u32 *args,
+ struct ffa_send_direct_data *data)
+{
+ data->data0 = args[0];
+ data->data1 = args[1];
+ data->data2 = args[2];
+ data->data3 = args[3];
+ data->data4 = args[4];
+}
+
+static void arg_list_from_ffa_data(const struct ffa_send_direct_data *data,
+ u32 *args)
+{
+ args[0] = lower_32_bits(data->data0);
+ args[1] = lower_32_bits(data->data1);
+ args[2] = lower_32_bits(data->data2);
+ args[3] = lower_32_bits(data->data3);
+ args[4] = lower_32_bits(data->data4);
+}
+
+static void tstee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tstee *tstee = tee_get_drvdata(teedev);
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_TSTEE,
+ /* FF-A endpoint ID only uses the lower 16 bits */
+ .impl_caps = lower_16_bits(tstee->ffa_dev->vm_id),
+ .gen_caps = 0,
+ };
+
+ *vers = v;
+}
+
+static int tstee_open(struct tee_context *ctx)
+{
+ struct ts_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ xa_init_flags(&ctxdata->sess_list, XA_FLAGS_ALLOC);
+
+ ctx->data = ctxdata;
+
+ return 0;
+}
+
+static void tstee_release(struct tee_context *ctx)
+{
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ts_session *sess;
+ unsigned long idx;
+
+ if (!ctxdata)
+ return;
+
+ xa_for_each(&ctxdata->sess_list, idx, sess) {
+ xa_erase(&ctxdata->sess_list, idx);
+ kfree(sess);
+ }
+
+ xa_destroy(&ctxdata->sess_list);
+
+ kfree(ctxdata);
+ ctx->data = NULL;
+}
+
+static int tstee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param __always_unused)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ffa_send_direct_data ffa_data;
+ struct ts_session *sess = NULL;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ u32 sess_id;
+ int rc;
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_SERVICE_INFO);
+
+ memcpy(ffa_args + TS_RPC_SERVICE_INFO_UUID0, arg->uuid, UUID_SIZE);
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc)
+ return rc;
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_SERVICE_INFO_RPC_STATUS] != TS_RPC_OK)
+ return -ENODEV;
+
+ if (ffa_args[TS_RPC_SERVICE_INFO_IFACE] > U8_MAX)
+ return -EINVAL;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ return -ENOMEM;
+
+ sess->iface_id = ffa_args[TS_RPC_SERVICE_INFO_IFACE];
+
+ rc = xa_alloc(&ctxdata->sess_list, &sess_id, sess, xa_limit_32b,
+ GFP_KERNEL);
+ if (rc) {
+ kfree(sess);
+ return rc;
+ }
+
+ arg->session = sess_id;
+ arg->ret = 0;
+
+ return 0;
+}
+
+static int tstee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ts_session *sess;
+
+ /* Calls xa_lock() internally */
+ sess = xa_erase(&ctxdata->sess_list, session);
+ if (!sess)
+ return -EINVAL;
+
+ kfree(sess);
+
+ return 0;
+}
+
+static int tstee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ffa_send_direct_data ffa_data;
+ struct tee_shm *shm = NULL;
+ struct ts_session *sess;
+ u32 req_len, ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ int shm_id, rc;
+ u8 iface_id;
+ u64 handle;
+ u16 opcode;
+
+ xa_lock(&ctxdata->sess_list);
+ sess = xa_load(&ctxdata->sess_list, arg->session);
+
+ /*
+ * Do this while holding the lock to make sure that the session wasn't
+ * closed meanwhile
+ */
+ if (sess)
+ iface_id = sess->iface_id;
+
+ xa_unlock(&ctxdata->sess_list);
+ if (!sess)
+ return -EINVAL;
+
+ opcode = lower_16_bits(arg->func);
+ shm_id = lower_32_bits(param[0].u.value.a);
+ req_len = lower_32_bits(param[0].u.value.b);
+
+ if (shm_id != 0) {
+ shm = tee_shm_get_from_id(ctx, shm_id);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ if (shm->size < req_len) {
+ dev_err(&ffa_dev->dev,
+ "request doesn't fit into shared memory buffer\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ handle = shm->sec_world_id;
+ } else {
+ handle = FFA_INVALID_MEM_HANDLE;
+ }
+
+ ffa_args[TS_RPC_CTRL_REG] = TS_RPC_CTRL_PACK_IFACE_OPCODE(iface_id,
+ opcode);
+ ffa_args[TS_RPC_SERVICE_MEM_HANDLE_LSW] = lower_32_bits(handle);
+ ffa_args[TS_RPC_SERVICE_MEM_HANDLE_MSW] = upper_32_bits(handle);
+ ffa_args[TS_RPC_SERVICE_REQ_LEN] = req_len;
+ ffa_args[TS_RPC_SERVICE_CLIENT_ID] = 0;
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc)
+ goto out;
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_SERVICE_RPC_STATUS] != TS_RPC_OK) {
+ dev_err(&ffa_dev->dev, "invoke_func rpc status: %d\n",
+ ffa_args[TS_RPC_SERVICE_RPC_STATUS]);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ arg->ret = ffa_args[TS_RPC_SERVICE_STATUS];
+ if (shm && shm->size >= ffa_args[TS_RPC_SERVICE_RESP_LEN])
+ param[0].u.value.a = ffa_args[TS_RPC_SERVICE_RESP_LEN];
+
+out:
+ if (shm)
+ tee_shm_put(shm);
+
+ return rc;
+}
+
+static int tstee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start __always_unused)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ffa_mem_region_attributes mem_attr = {
+ .receiver = tstee->ffa_dev->vm_id,
+ .attrs = FFA_MEM_RW,
+ .flag = 0,
+ };
+ struct ffa_mem_ops_args mem_args = {
+ .attrs = &mem_attr,
+ .use_txbuf = true,
+ .nattrs = 1,
+ .flags = 0,
+ };
+ struct ffa_send_direct_data ffa_data;
+ struct sg_table sgt;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ int rc;
+
+ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
+ num_pages * PAGE_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ mem_args.sg = sgt.sgl;
+ rc = ffa_dev->ops->mem_ops->memory_share(&mem_args);
+ sg_free_table(&sgt);
+ if (rc)
+ return rc;
+
+ shm->sec_world_id = mem_args.g_handle;
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_RETRIEVE_MEM);
+ ffa_args[TS_RPC_RETRIEVE_MEM_HANDLE_LSW] =
+ lower_32_bits(shm->sec_world_id);
+ ffa_args[TS_RPC_RETRIEVE_MEM_HANDLE_MSW] =
+ upper_32_bits(shm->sec_world_id);
+ ffa_args[TS_RPC_RETRIEVE_MEM_TAG_LSW] = 0;
+ ffa_args[TS_RPC_RETRIEVE_MEM_TAG_MSW] = 0;
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc) {
+ (void)ffa_dev->ops->mem_ops->memory_reclaim(shm->sec_world_id,
+ 0);
+ return rc;
+ }
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_RETRIEVE_MEM_RPC_STATUS] != TS_RPC_OK) {
+ dev_err(&ffa_dev->dev, "shm_register rpc status: %d\n",
+ ffa_args[TS_RPC_RETRIEVE_MEM_RPC_STATUS]);
+ ffa_dev->ops->mem_ops->memory_reclaim(shm->sec_world_id, 0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tstee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ffa_send_direct_data ffa_data;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ int rc;
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_RELINQ_MEM);
+ ffa_args[TS_RPC_RELINQ_MEM_HANDLE_LSW] =
+ lower_32_bits(shm->sec_world_id);
+ ffa_args[TS_RPC_RELINQ_MEM_HANDLE_MSW] =
+ upper_32_bits(shm->sec_world_id);
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc)
+ return rc;
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_RELINQ_MEM_RPC_STATUS] != TS_RPC_OK) {
+ dev_err(&ffa_dev->dev, "shm_unregister rpc status: %d\n",
+ ffa_args[TS_RPC_RELINQ_MEM_RPC_STATUS]);
+ return -EINVAL;
+ }
+
+ rc = ffa_dev->ops->mem_ops->memory_reclaim(shm->sec_world_id, 0);
+
+ return rc;
+}
+
+static const struct tee_driver_ops tstee_ops = {
+ .get_version = tstee_get_version,
+ .open = tstee_open,
+ .release = tstee_release,
+ .open_session = tstee_open_session,
+ .close_session = tstee_close_session,
+ .invoke_func = tstee_invoke_func,
+};
+
+static const struct tee_desc tstee_desc = {
+ .name = "tstee-clnt",
+ .ops = &tstee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
+{
+ return tee_dyn_shm_alloc_helper(shm, size, align, tstee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
+{
+ tee_dyn_shm_free_helper(shm, tstee_shm_unregister);
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+static struct tee_shm_pool *tstee_create_shm_pool(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
+
+static bool tstee_check_rpc_compatible(struct ffa_device *ffa_dev)
+{
+ struct ffa_send_direct_data ffa_data;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_GET_VERSION);
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ if (ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data))
+ return false;
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ return ffa_args[TS_RPC_GET_VERSION_RESP] == TS_RPC_PROTOCOL_VERSION;
+}
+
+static int tstee_probe(struct ffa_device *ffa_dev)
+{
+ struct tstee *tstee;
+ int rc;
+
+ ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev);
+
+ if (!tstee_check_rpc_compatible(ffa_dev))
+ return -EINVAL;
+
+ tstee = kzalloc(sizeof(*tstee), GFP_KERNEL);
+ if (!tstee)
+ return -ENOMEM;
+
+ tstee->ffa_dev = ffa_dev;
+
+ tstee->pool = tstee_create_shm_pool();
+ if (IS_ERR(tstee->pool)) {
+ rc = PTR_ERR(tstee->pool);
+ tstee->pool = NULL;
+ goto err_free_tstee;
+ }
+
+ tstee->teedev = tee_device_alloc(&tstee_desc, NULL, tstee->pool, tstee);
+ if (IS_ERR(tstee->teedev)) {
+ rc = PTR_ERR(tstee->teedev);
+ tstee->teedev = NULL;
+ goto err_free_pool;
+ }
+
+ rc = tee_device_register(tstee->teedev);
+ if (rc)
+ goto err_unreg_teedev;
+
+ ffa_dev_set_drvdata(ffa_dev, tstee);
+
+ return 0;
+
+err_unreg_teedev:
+ tee_device_unregister(tstee->teedev);
+err_free_pool:
+ tee_shm_pool_free(tstee->pool);
+err_free_tstee:
+ kfree(tstee);
+ return rc;
+}
+
+static void tstee_remove(struct ffa_device *ffa_dev)
+{
+ struct tstee *tstee = ffa_dev->dev.driver_data;
+
+ tee_device_unregister(tstee->teedev);
+ tee_shm_pool_free(tstee->pool);
+ kfree(tstee);
+}
+
+static const struct ffa_device_id tstee_device_ids[] = {
+ /* TS RPC protocol UUID: bdcd76d7-825e-4751-963b-86d4f84943ac */
+ { TS_RPC_UUID },
+ {}
+};
+
+static struct ffa_driver tstee_driver = {
+ .name = "arm_tstee",
+ .probe = tstee_probe,
+ .remove = tstee_remove,
+ .id_table = tstee_device_ids,
+};
+
+module_ffa_driver(tstee_driver);
+
+MODULE_AUTHOR("Balint Dobszay <balint.dobszay@arm.com>");
+MODULE_DESCRIPTION("Arm Trusted Services TEE driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tee/tstee/tstee_private.h b/drivers/tee/tstee/tstee_private.h
new file mode 100644
index 000000000000..8e58725b57eb
--- /dev/null
+++ b/drivers/tee/tstee/tstee_private.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Arm Limited
+ */
+
+#ifndef TSTEE_PRIVATE_H
+#define TSTEE_PRIVATE_H
+
+#include <linux/arm_ffa.h>
+#include <linux/bitops.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+
+/*
+ * The description of the ABI implemented in this file is available at
+ * https://trusted-services.readthedocs.io/en/v1.0.0/developer/service-access-protocols.html#abi
+ */
+
+/* UUID of this protocol */
+#define TS_RPC_UUID UUID_INIT(0xbdcd76d7, 0x825e, 0x4751, \
+ 0x96, 0x3b, 0x86, 0xd4, 0xf8, 0x49, 0x43, 0xac)
+
+/* Protocol version*/
+#define TS_RPC_PROTOCOL_VERSION (1)
+
+/* Status codes */
+#define TS_RPC_OK (0)
+
+/* RPC control register */
+#define TS_RPC_CTRL_REG (0)
+#define OPCODE_MASK GENMASK(15, 0)
+#define IFACE_ID_MASK GENMASK(23, 16)
+#define TS_RPC_CTRL_OPCODE(x) ((u16)(FIELD_GET(OPCODE_MASK, (x))))
+#define TS_RPC_CTRL_IFACE_ID(x) ((u8)(FIELD_GET(IFACE_ID_MASK, (x))))
+#define TS_RPC_CTRL_PACK_IFACE_OPCODE(i, o) \
+ (FIELD_PREP(IFACE_ID_MASK, (i)) | FIELD_PREP(OPCODE_MASK, (o)))
+#define TS_RPC_CTRL_SAP_RC BIT(30)
+#define TS_RPC_CTRL_SAP_ERR BIT(31)
+
+/* Interface ID for RPC management operations */
+#define TS_RPC_MGMT_IFACE_ID (0xff)
+
+/* Management calls */
+#define TS_RPC_OP_GET_VERSION (0x0000)
+#define TS_RPC_GET_VERSION_RESP (1)
+
+#define TS_RPC_OP_RETRIEVE_MEM (0x0001)
+#define TS_RPC_RETRIEVE_MEM_HANDLE_LSW (1)
+#define TS_RPC_RETRIEVE_MEM_HANDLE_MSW (2)
+#define TS_RPC_RETRIEVE_MEM_TAG_LSW (3)
+#define TS_RPC_RETRIEVE_MEM_TAG_MSW (4)
+#define TS_RPC_RETRIEVE_MEM_RPC_STATUS (1)
+
+#define TS_RPC_OP_RELINQ_MEM (0x0002)
+#define TS_RPC_RELINQ_MEM_HANDLE_LSW (1)
+#define TS_RPC_RELINQ_MEM_HANDLE_MSW (2)
+#define TS_RPC_RELINQ_MEM_RPC_STATUS (1)
+
+#define TS_RPC_OP_SERVICE_INFO (0x0003)
+#define TS_RPC_SERVICE_INFO_UUID0 (1)
+#define TS_RPC_SERVICE_INFO_UUID1 (2)
+#define TS_RPC_SERVICE_INFO_UUID2 (3)
+#define TS_RPC_SERVICE_INFO_UUID3 (4)
+#define TS_RPC_SERVICE_INFO_RPC_STATUS (1)
+#define TS_RPC_SERVICE_INFO_IFACE (2)
+
+/* Service call */
+#define TS_RPC_SERVICE_MEM_HANDLE_LSW (1)
+#define TS_RPC_SERVICE_MEM_HANDLE_MSW (2)
+#define TS_RPC_SERVICE_REQ_LEN (3)
+#define TS_RPC_SERVICE_CLIENT_ID (4)
+#define TS_RPC_SERVICE_RPC_STATUS (1)
+#define TS_RPC_SERVICE_STATUS (2)
+#define TS_RPC_SERVICE_RESP_LEN (3)
+
+struct tstee {
+ struct ffa_device *ffa_dev;
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+struct ts_session {
+ u8 iface_id;
+};
+
+struct ts_context_data {
+ struct xarray sess_list;
+};
+
+#endif /* TSTEE_PRIVATE_H */