summaryrefslogtreecommitdiff
path: root/drivers/tee/optee
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tee/optee')
-rw-r--r--drivers/tee/optee/Kconfig5
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c9
-rw-r--r--drivers/tee/optee/ffa_abi.c146
-rw-r--r--drivers/tee/optee/optee_ffa.h27
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h15
-rw-r--r--drivers/tee/optee/optee_smc.h37
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/smc_abi.c141
10 files changed, 776 insertions, 24 deletions
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 7bb7990d0b07..50d2051f7f20 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -25,3 +25,8 @@ config OPTEE_INSECURE_LOAD_IMAGE
Additional documentation on kernel security risks are at
Documentation/tee/op-tee.rst.
+
+config OPTEE_STATIC_PROTMEM_POOL
+ bool
+ depends on HAS_IOMEM && TEE_DMABUF_HEAPS
+ default y
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index a6eff388d300..ad7049c1c107 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,6 +4,7 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += notif.o
optee-objs += rpc.o
+optee-objs += protmem.o
optee-objs += supp.o
optee-objs += device.o
optee-objs += smc_abi.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index c75fddc83576..5b62139714ce 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -56,6 +56,13 @@ int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action,
return 0;
}
+int optee_set_dma_mask(struct optee *optee, u_int pa_width)
+{
+ u64 mask = DMA_BIT_MASK(min(64, pa_width));
+
+ return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask);
+}
+
static void optee_bus_scan(struct work_struct *work)
{
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
@@ -72,7 +79,7 @@ static ssize_t rpmb_routing_model_show(struct device *dev,
else
s = "user";
- return scnprintf(buf, PAGE_SIZE, "%s\n", s);
+ return sysfs_emit(buf, "%s\n", s);
}
static DEVICE_ATTR_RO(rpmb_routing_model);
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index a963eed70c1d..bf8390789ecf 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -649,6 +649,124 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread);
}
+static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = cookie;
+ msg_arg->params[0].u.value.b = use_case;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
+static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attrs, unsigned int ma_count,
+ u32 use_case)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ struct ffa_send_direct_data data;
+ struct ffa_mem_region_attributes *mem_attr;
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .tag = use_case,
+ };
+ struct page *page;
+ struct scatterlist sgl;
+ unsigned int n;
+ int rc;
+
+ mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL);
+ for (n = 0; n < ma_count; n++) {
+ mem_attr[n].receiver = mem_attrs[n] & U16_MAX;
+ mem_attr[n].attrs = mem_attrs[n] >> 16;
+ }
+ args.attrs = mem_attr;
+ args.nattrs = ma_count;
+
+ page = phys_to_page(protmem->paddr);
+ sg_init_table(&sgl, 1);
+ sg_set_page(&sgl, page, protmem->size, 0);
+
+ args.sg = &sgl;
+ rc = mem_ops->memory_lend(&args);
+ kfree(mem_attr);
+ if (rc)
+ return rc;
+
+ rc = do_call_lend_protmem(optee, args.g_handle, use_case);
+ if (rc)
+ goto err_reclaim;
+
+ rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle);
+ if (rc)
+ goto err_unreg;
+
+ protmem->sec_world_id = args.g_handle;
+
+ return 0;
+
+err_unreg:
+ data = (struct ffa_send_direct_data){
+ .data0 = OPTEE_FFA_RELEASE_PROTMEM,
+ .data1 = (u32)args.g_handle,
+ .data2 = (u32)(args.g_handle >> 32),
+ };
+ msg_ops->sync_send_receive(ffa_dev, &data);
+err_reclaim:
+ mem_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+}
+
+static int optee_ffa_reclaim_protmem(struct optee *optee,
+ struct tee_shm *protmem)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
+ u64 global_handle = protmem->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_RELEASE_PROTMEM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ protmem->sec_world_id = 0;
+
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = mem_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
+}
+
/*
* 6. Driver initialization
*
@@ -819,6 +937,8 @@ static const struct optee_ops optee_ffa_ops = {
.do_call_with_arg = optee_ffa_do_call_with_arg,
.to_msg_param = optee_ffa_to_msg_param,
.from_msg_param = optee_ffa_from_msg_param,
+ .lend_protmem = optee_ffa_lend_protmem,
+ .reclaim_protmem = optee_ffa_reclaim_protmem,
};
static void optee_ffa_remove(struct ffa_device *ffa_dev)
@@ -891,6 +1011,25 @@ err:
return rc;
}
+static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps)
+{
+ enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+ struct tee_protmem_pool *pool;
+ int rc = 0;
+
+ if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) {
+ pool = optee_protmem_alloc_dyn_pool(optee, id);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rc = tee_device_register_dma_heap(optee->teedev, id, pool);
+ if (rc)
+ pool->ops->destroy_pool(pool);
+ }
+
+ return rc;
+}
+
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
const struct ffa_notifier_ops *notif_ops;
@@ -941,7 +1080,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err_free_pool;
+ goto err_free_shm_pool;
}
optee->teedev = teedev;
@@ -988,6 +1127,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
rc);
}
+ if (optee_ffa_protmem_pool_init(optee, sec_caps))
+ pr_info("Protected memory service not available\n");
+
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc)
goto err_unregister_devices;
@@ -1018,7 +1160,7 @@ err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
err_unreg_teedev:
tee_device_unregister(optee->teedev);
-err_free_pool:
+err_free_shm_pool:
tee_shm_pool_free(pool);
err_free_optee:
kfree(optee);
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index 257735ae5b56..cc257e7956a3 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -81,7 +81,7 @@
* as the second MSG arg struct for
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
* Bit[31:8]: Reserved (MBZ)
- * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
+ * w5: Bitfield of OP-TEE capabilities OPTEE_FFA_SEC_CAP_*
* w6: The maximum secure world notification number
* w7: Not used (MBZ)
*/
@@ -94,6 +94,8 @@
#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
/* OP-TEE supports probing for RPMB device if needed */
#define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2)
+/* OP-TEE supports Protected Memory for secure data path */
+#define OPTEE_FFA_SEC_CAP_PROTMEM BIT(3)
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
@@ -108,7 +110,7 @@
*
* Return register usage:
* w3: Error code, 0 on success
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*/
#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
@@ -119,16 +121,31 @@
* Call register usage:
* w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF
* w4: Notification value to request bottom half processing, should be
- * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE.
+ * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE
* w5-w7: Not used (MBZ)
*
* Return register usage:
* w3: Error code, 0 on success
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*/
#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
-#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+
+/*
+ * Release Protected memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_RECLAIM_PROTMEM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_RELEASE_PROTMEM OPTEE_FFA_BLOCKING_CALL(8)
/*
* Call with struct optee_msg_arg as argument in the supplied shared memory
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index e8840a82b983..838e1d4a22f0 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -133,13 +133,13 @@ struct optee_msg_param_rmem {
};
/**
- * struct optee_msg_param_fmem - ffa memory reference parameter
+ * struct optee_msg_param_fmem - FF-A memory reference parameter
* @offs_lower: Lower bits of offset into shared memory reference
* @offs_upper: Upper bits of offset into shared memory reference
* @internal_offs: Internal offset into the first page of shared memory
* reference
* @size: Size of the buffer
- * @global_id: Global identifier of Shared memory
+ * @global_id: Global identifier of the shared memory
*/
struct optee_msg_param_fmem {
u32 offs_low;
@@ -165,7 +165,7 @@ struct optee_msg_param_value {
* @attr: attributes
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
- * @fmem: parameter by ffa registered memory reference
+ * @fmem: parameter by FF-A registered memory reference
* @value: parameter by opaque value
* @octets: parameter by octet string
*
@@ -297,6 +297,18 @@ struct optee_msg_arg {
#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
/*
+ * Values used in OPTEE_MSG_CMD_LEND_PROTMEM below
+ * OPTEE_MSG_PROTMEM_RESERVED Reserved
+ * OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY Secure Video Playback
+ * OPTEE_MSG_PROTMEM_TRUSTED_UI Trused UI
+ * OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD Secure Video Recording
+ */
+#define OPTEE_MSG_PROTMEM_RESERVED 0
+#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY 1
+#define OPTEE_MSG_PROTMEM_TRUSTED_UI 2
+#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD 3
+
+/*
* Do a secure call with struct optee_msg_arg as argument
* The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
*
@@ -337,15 +349,63 @@ struct optee_msg_arg {
* OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
* normal world unable to process asynchronous notifications. Typically
* used when the driver is shut down.
+ *
+ * OPTEE_MSG_CMD_LEND_PROTMEM lends protected memory. The passed normal
+ * physical memory is protected from normal world access. The memory
+ * should be unmapped prior to this call since it becomes inaccessible
+ * during the request.
+ * Parameters are passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a OPTEE_MSG_PROTMEM_* defined above
+ * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [in] param[1].u.tmem.buf_ptr physical address
+ * [in] param[1].u.tmem.size size
+ * [in] param[1].u.tmem.shm_ref holds protected memory reference
+ *
+ * OPTEE_MSG_CMD_RECLAIM_PROTMEM reclaims a previously lent protected
+ * memory reference. The physical memory is accessible by the normal world
+ * after this function has return and can be mapped again. The information
+ * is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a holds protected memory cookie
+ *
+ * OPTEE_MSG_CMD_GET_PROTMEM_CONFIG get configuration for a specific
+ * protected memory use case. Parameters are passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INOUT
+ * [in] param[0].value.a OPTEE_MSG_PROTMEM_*
+ * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_{R,F}MEM_OUTPUT
+ * [in] param[1].u.{r,f}mem Buffer or NULL
+ * [in] param[1].u.{r,f}mem.size Provided size of buffer or 0 for query
+ * output for the protected use case:
+ * [out] param[0].value.a Minimal size of protected memory
+ * [out] param[0].value.b Required alignment of size and start of
+ * protected memory
+ * [out] param[0].value.c PA width, max 64
+ * [out] param[1].{r,f}mem.size Size of output data
+ * [out] param[1].{r,f}mem If non-NULL, contains an array of
+ * uint32_t memory attributes that must be
+ * included when lending memory for this
+ * use case
+ *
+ * OPTEE_MSG_CMD_ASSIGN_PROTMEM assigns use-case to protected memory
+ * previously lent using the FFA_LEND framework ABI. Parameters are passed
+ * as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT
+ * [in] param[0].u.value.a holds protected memory cookie
+ * [in] param[0].u.value.b OPTEE_MSG_PROTMEM_* defined above
*/
-#define OPTEE_MSG_CMD_OPEN_SESSION 0
-#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
-#define OPTEE_MSG_CMD_CLOSE_SESSION 2
-#define OPTEE_MSG_CMD_CANCEL 3
-#define OPTEE_MSG_CMD_REGISTER_SHM 4
-#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
-#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
-#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
-#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
+#define OPTEE_MSG_CMD_LEND_PROTMEM 8
+#define OPTEE_MSG_CMD_RECLAIM_PROTMEM 9
+#define OPTEE_MSG_CMD_GET_PROTMEM_CONFIG 10
+#define OPTEE_MSG_CMD_ASSIGN_PROTMEM 11
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 9526087f0e68..db9ea673fbca 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -176,9 +176,14 @@ struct optee;
* @do_call_with_arg: enters OP-TEE in secure world
* @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
* @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ * @lend_protmem: lends physically contiguous memory as restricted
+ * memory, inaccessible by the kernel
+ * @reclaim_protmem: reclaims restricted memory previously lent with
+ * @lend_protmem() and makes it accessible by the
+ * kernel again
*
* These OPs are only supposed to be used internally in the OP-TEE driver
- * as a way of abstracting the different methogs of entering OP-TEE in
+ * as a way of abstracting the different methods of entering OP-TEE in
* secure world.
*/
struct optee_ops {
@@ -191,6 +196,10 @@ struct optee_ops {
int (*from_msg_param)(struct optee *optee, struct tee_param *params,
size_t num_params,
const struct optee_msg_param *msg_params);
+ int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attr, unsigned int ma_count,
+ u32 use_case);
+ int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem);
};
/**
@@ -274,6 +283,8 @@ struct optee_call_ctx {
extern struct blocking_notifier_head optee_rpmb_intf_added;
+int optee_set_dma_mask(struct optee *optee, u_int pa_width);
+
int optee_notif_init(struct optee *optee, u_int max_key);
void optee_notif_uninit(struct optee *optee);
int optee_notif_wait(struct optee *optee, u_int key, u32 timeout);
@@ -285,6 +296,8 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
+struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
+ enum tee_dma_heap_id id);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 879426300821..accf76a99288 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -264,7 +264,6 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
/* Secure world can communicate via previously unregistered shared memory */
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
-
/*
* Secure world supports commands "register/unregister shared memory",
* secure world accepts command buffers located in any parts of non-secure RAM
@@ -280,6 +279,10 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6)
/* Secure world supports probing for RPMB device if needed */
#define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7)
+/* Secure world supports protected memory */
+#define OPTEE_SMC_SEC_CAP_PROTMEM BIT(8)
+/* Secure world supports dynamic protected memory */
+#define OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM BIT(9)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -451,6 +454,38 @@ struct optee_smc_disable_shm_cache_result {
/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19
+/*
+ * Get protected memory config
+ *
+ * Returns the protected memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_PROTMEM_CONFIG
+ * a2-6 Not used, must be zero
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of protected memory
+ * a2 Size of protected memory
+ * a3 PA width, max 64
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG 20
+#define OPTEE_SMC_GET_PROTMEM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG)
+
+struct optee_smc_get_protmem_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long pa_width;
+};
/*
* Resume from RPC (for example after processing a foreign interrupt)
diff --git a/drivers/tee/optee/protmem.c b/drivers/tee/optee/protmem.c
new file mode 100644
index 000000000000..2eba48d5ac73
--- /dev/null
+++ b/drivers/tee/optee/protmem.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Linaro Limited
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include "optee_private.h"
+
+struct optee_protmem_dyn_pool {
+ struct tee_protmem_pool pool;
+ struct gen_pool *gen_pool;
+ struct optee *optee;
+ size_t page_count;
+ u32 *mem_attrs;
+ u_int mem_attr_count;
+ refcount_t refcount;
+ u32 use_case;
+ struct tee_shm *protmem;
+ /* Protects when initializing and tearing down this struct */
+ struct mutex mutex;
+};
+
+static struct optee_protmem_dyn_pool *
+to_protmem_dyn_pool(struct tee_protmem_pool *pool)
+{
+ return container_of(pool, struct optee_protmem_dyn_pool, pool);
+}
+
+static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ int rc;
+
+ rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count);
+ if (IS_ERR(rp->protmem)) {
+ rc = PTR_ERR(rp->protmem);
+ goto err_null_protmem;
+ }
+
+ /*
+ * TODO unmap the memory range since the physical memory will
+ * become inaccesible after the lend_protmem() call.
+ *
+ * If the platform supports a hypervisor at EL2, it will unmap the
+ * intermediate physical memory for us and stop cache pre-fetch of
+ * the memory.
+ */
+ rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem,
+ rp->mem_attrs,
+ rp->mem_attr_count, rp->use_case);
+ if (rc)
+ goto err_put_shm;
+ rp->protmem->flags |= TEE_SHM_DYNAMIC;
+
+ rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!rp->gen_pool) {
+ rc = -ENOMEM;
+ goto err_reclaim;
+ }
+
+ rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr,
+ rp->protmem->size, -1);
+ if (rc)
+ goto err_free_pool;
+
+ refcount_set(&rp->refcount, 1);
+ return 0;
+
+err_free_pool:
+ gen_pool_destroy(rp->gen_pool);
+ rp->gen_pool = NULL;
+err_reclaim:
+ rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
+err_put_shm:
+ tee_shm_put(rp->protmem);
+err_null_protmem:
+ rp->protmem = NULL;
+ return rc;
+}
+
+static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ int rc = 0;
+
+ if (!refcount_inc_not_zero(&rp->refcount)) {
+ mutex_lock(&rp->mutex);
+ if (rp->gen_pool) {
+ /*
+ * Another thread has already initialized the pool
+ * before us, or the pool was just about to be torn
+ * down. Either way we only need to increase the
+ * refcount and we're done.
+ */
+ refcount_inc(&rp->refcount);
+ } else {
+ rc = init_dyn_protmem(rp);
+ }
+ mutex_unlock(&rp->mutex);
+ }
+
+ return rc;
+}
+
+static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ gen_pool_destroy(rp->gen_pool);
+ rp->gen_pool = NULL;
+
+ rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem);
+ rp->protmem->flags &= ~TEE_SHM_DYNAMIC;
+
+ WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount");
+ tee_shm_put(rp->protmem);
+ rp->protmem = NULL;
+}
+
+static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp)
+{
+ if (refcount_dec_and_test(&rp->refcount)) {
+ mutex_lock(&rp->mutex);
+ if (rp->gen_pool)
+ release_dyn_protmem(rp);
+ mutex_unlock(&rp->mutex);
+ }
+}
+
+static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t size,
+ size_t *offs)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+ size_t sz = ALIGN(size, PAGE_SIZE);
+ phys_addr_t pa;
+ int rc;
+
+ rc = get_dyn_protmem(rp);
+ if (rc)
+ return rc;
+
+ pa = gen_pool_alloc(rp->gen_pool, sz);
+ if (!pa) {
+ rc = -ENOMEM;
+ goto err_put;
+ }
+
+ rc = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+
+ sg_set_page(sgt->sgl, phys_to_page(pa), size, 0);
+ *offs = pa - rp->protmem->paddr;
+
+ return 0;
+err_free:
+ gen_pool_free(rp->gen_pool, pa, size);
+err_put:
+ put_dyn_protmem(rp);
+
+ return rc;
+}
+
+static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool,
+ struct sg_table *sgt)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length);
+ sg_free_table(sgt);
+ put_dyn_protmem(rp);
+}
+
+static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool,
+ struct sg_table *sgt, size_t offs,
+ struct tee_shm *shm,
+ struct tee_shm **parent_shm)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+
+ *parent_shm = rp->protmem;
+
+ return 0;
+}
+
+static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool)
+{
+ struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool);
+
+ mutex_destroy(&rp->mutex);
+ kfree(rp);
+}
+
+static struct tee_protmem_pool_ops protmem_pool_ops_dyn = {
+ .alloc = protmem_pool_op_dyn_alloc,
+ .free = protmem_pool_op_dyn_free,
+ .update_shm = protmem_pool_op_dyn_update_shm,
+ .destroy_pool = pool_op_dyn_destroy_pool,
+};
+
+static int get_protmem_config(struct optee *optee, u32 use_case,
+ size_t *min_size, u_int *pa_width,
+ u32 *mem_attrs, u_int *ma_count)
+{
+ struct tee_param params[2] = {
+ [0] = {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT,
+ .u.value.a = use_case,
+ },
+ [1] = {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT,
+ },
+ };
+ struct optee_shm_arg_entry *entry;
+ struct tee_shm *shm_param = NULL;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ if (mem_attrs && *ma_count) {
+ params[1].u.memref.size = *ma_count * sizeof(*mem_attrs);
+ shm_param = tee_shm_alloc_priv_buf(optee->ctx,
+ params[1].u.memref.size);
+ if (IS_ERR(shm_param))
+ return PTR_ERR(shm_param);
+ params[1].u.memref.shm = shm_param;
+ }
+
+ msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry,
+ &shm, &offs);
+ if (IS_ERR(msg_arg)) {
+ rc = PTR_ERR(msg_arg);
+ goto out_free_shm;
+ }
+ msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG;
+
+ rc = optee->ops->to_msg_param(optee, msg_arg->params,
+ ARRAY_SIZE(params), params);
+ if (rc)
+ goto out_free_msg;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out_free_msg;
+ if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) {
+ rc = -EINVAL;
+ goto out_free_msg;
+ }
+
+ rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params),
+ msg_arg->params);
+ if (rc)
+ goto out_free_msg;
+
+ if (!msg_arg->ret && mem_attrs &&
+ *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) {
+ rc = -EINVAL;
+ goto out_free_msg;
+ }
+
+ *min_size = params[0].u.value.a;
+ *pa_width = params[0].u.value.c;
+ *ma_count = params[1].u.memref.size / sizeof(*mem_attrs);
+
+ if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) {
+ rc = -ENOSPC;
+ goto out_free_msg;
+ }
+
+ if (mem_attrs)
+ memcpy(mem_attrs, tee_shm_get_va(shm_param, 0),
+ params[1].u.memref.size);
+
+out_free_msg:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+out_free_shm:
+ if (shm_param)
+ tee_shm_free(shm_param);
+ return rc;
+}
+
+struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee,
+ enum tee_dma_heap_id id)
+{
+ struct optee_protmem_dyn_pool *rp;
+ size_t min_size;
+ u_int pa_width;
+ int rc;
+
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return ERR_PTR(-ENOMEM);
+ rp->use_case = id;
+
+ rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL,
+ &rp->mem_attr_count);
+ if (rc) {
+ if (rc != -ENOSPC)
+ goto err;
+ rp->mem_attrs = kcalloc(rp->mem_attr_count,
+ sizeof(*rp->mem_attrs), GFP_KERNEL);
+ if (!rp->mem_attrs) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ rc = get_protmem_config(optee, id, &min_size, &pa_width,
+ rp->mem_attrs, &rp->mem_attr_count);
+ if (rc)
+ goto err_kfree_eps;
+ }
+
+ rc = optee_set_dma_mask(optee, pa_width);
+ if (rc)
+ goto err_kfree_eps;
+
+ rp->pool.ops = &protmem_pool_ops_dyn;
+ rp->optee = optee;
+ rp->page_count = min_size / PAGE_SIZE;
+ mutex_init(&rp->mutex);
+
+ return &rp->pool;
+
+err_kfree_eps:
+ kfree(rp->mem_attrs);
+err:
+ kfree(rp);
+ return ERR_PTR(rc);
+}
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 26f8f7bbbe56..0be663fcd52b 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -965,6 +965,70 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
+static int optee_smc_lend_protmem(struct optee *optee, struct tee_shm *protmem,
+ u32 *mem_attrs, unsigned int ma_count,
+ u32 use_case)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_LEND_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ msg_arg->params[0].u.value.a = use_case;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ msg_arg->params[1].u.tmem.buf_ptr = protmem->paddr;
+ msg_arg->params[1].u.tmem.size = protmem->size;
+ msg_arg->params[1].u.tmem.shm_ref = (u_long)protmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+ protmem->sec_world_id = (u_long)protmem;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
+static int optee_smc_reclaim_protmem(struct optee *optee,
+ struct tee_shm *protmem)
+{
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+ int rc;
+
+ msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_PROTMEM;
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (u_long)protmem;
+
+ rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false);
+ if (rc)
+ goto out;
+ if (msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+out:
+ optee_free_msg_arg(optee->ctx, entry, offs);
+ return rc;
+}
+
/*
* 5. Asynchronous notification
*/
@@ -1216,6 +1280,8 @@ static const struct optee_ops optee_ops = {
.do_call_with_arg = optee_smc_do_call_with_arg,
.to_msg_param = optee_to_msg_param,
.from_msg_param = optee_from_msg_param,
+ .lend_protmem = optee_smc_lend_protmem,
+ .reclaim_protmem = optee_smc_reclaim_protmem,
};
static int enable_async_notif(optee_invoke_fn *invoke_fn)
@@ -1583,6 +1649,74 @@ static inline int optee_load_fw(struct platform_device *pdev,
}
#endif
+static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee)
+{
+#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL)
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_protmem_config_result result;
+ } res;
+ struct tee_protmem_pool *pool;
+ void *p;
+ int rc;
+
+ optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0,
+ 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return ERR_PTR(-EINVAL);
+
+ rc = optee_set_dma_mask(optee, res.result.pa_width);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /*
+ * Map the memory as uncached to make sure the kernel can work with
+ * __pfn_to_page() and friends since that's needed when passing the
+ * protected DMA-buf to a device. The memory should otherwise not
+ * be touched by the kernel since it's likely to cause an external
+ * abort due to the protection status.
+ */
+ p = devm_memremap(&optee->teedev->dev, res.result.start,
+ res.result.size, MEMREMAP_WC);
+ if (IS_ERR(p))
+ return p;
+
+ pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size);
+ if (IS_ERR(pool))
+ devm_memunmap(&optee->teedev->dev, p);
+
+ return pool;
+#else
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static int optee_protmem_pool_init(struct optee *optee)
+{
+ bool protm = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM;
+ bool dyn_protm = optee->smc.sec_caps &
+ OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM;
+ enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY;
+ struct tee_protmem_pool *pool = ERR_PTR(-EINVAL);
+ int rc = -EINVAL;
+
+ if (!protm && !dyn_protm)
+ return 0;
+
+ if (protm)
+ pool = static_protmem_pool_init(optee);
+ if (dyn_protm && IS_ERR(pool))
+ pool = optee_protmem_alloc_dyn_pool(optee, heap_id);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool);
+ if (rc)
+ pool->ops->destroy_pool(pool);
+
+ return rc;
+}
+
static int optee_probe(struct platform_device *pdev)
{
optee_invoke_fn *invoke_fn;
@@ -1678,7 +1812,7 @@ static int optee_probe(struct platform_device *pdev)
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;
- goto err_free_pool;
+ goto err_free_shm_pool;
}
optee->ops = &optee_ops;
@@ -1751,6 +1885,9 @@ static int optee_probe(struct platform_device *pdev)
pr_info("Asynchronous notifications enabled\n");
}
+ if (optee_protmem_pool_init(optee))
+ pr_info("Protected memory service not available\n");
+
/*
* Ensure that there are no pre-existing shm objects before enabling
* the shm cache so that there's no chance of receiving an invalid
@@ -1802,7 +1939,7 @@ err_unreg_teedev:
tee_device_unregister(optee->teedev);
err_free_optee:
kfree(optee);
-err_free_pool:
+err_free_shm_pool:
tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);