1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Linaro Limited
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/arm-smccc.h>
9 #include <linux/errno.h>
11 #include <linux/module.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/tee_drv.h>
18 #include <linux/types.h>
19 #include <linux/uaccess.h>
20 #include "optee_private.h"
21 #include "optee_smc.h"
24 #define DRIVER_NAME "optee"
26 #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
29 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
31 * @params: subsystem internal parameter representation
32 * @num_params: number of elements in the parameter arrays
33 * @msg_params: OPTEE_MSG parameters
34 * Returns 0 on success or <0 on failure
36 int optee_from_msg_param(struct tee_param *params, size_t num_params,
37 const struct optee_msg_param *msg_params)
44 for (n = 0; n < num_params; n++) {
45 struct tee_param *p = params + n;
46 const struct optee_msg_param *mp = msg_params + n;
47 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
50 case OPTEE_MSG_ATTR_TYPE_NONE:
51 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
52 memset(&p->u, 0, sizeof(p->u));
54 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
55 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
56 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
57 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
58 attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
59 p->u.value.a = mp->u.value.a;
60 p->u.value.b = mp->u.value.b;
61 p->u.value.c = mp->u.value.c;
63 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
64 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
65 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
66 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
67 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
68 p->u.memref.size = mp->u.tmem.size;
69 shm = (struct tee_shm *)(unsigned long)
72 p->u.memref.shm_offs = 0;
73 p->u.memref.shm = NULL;
76 rc = tee_shm_get_pa(shm, 0, &pa);
79 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
80 p->u.memref.shm = shm;
82 /* Check that the memref is covered by the shm object */
83 if (p->u.memref.size) {
84 size_t o = p->u.memref.shm_offs +
87 rc = tee_shm_get_pa(shm, o, NULL);
92 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
93 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
94 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
95 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
96 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
97 p->u.memref.size = mp->u.rmem.size;
98 shm = (struct tee_shm *)(unsigned long)
102 p->u.memref.shm_offs = 0;
103 p->u.memref.shm = NULL;
106 p->u.memref.shm_offs = mp->u.rmem.offs;
107 p->u.memref.shm = shm;
118 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
119 const struct tee_param *p)
124 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
125 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
127 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
128 mp->u.tmem.size = p->u.memref.size;
130 if (!p->u.memref.shm) {
131 mp->u.tmem.buf_ptr = 0;
135 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
139 mp->u.tmem.buf_ptr = pa;
140 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
141 OPTEE_MSG_ATTR_CACHE_SHIFT;
146 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
147 const struct tee_param *p)
149 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
150 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
152 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
153 mp->u.rmem.size = p->u.memref.size;
154 mp->u.rmem.offs = p->u.memref.shm_offs;
159 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
160 * @msg_params: OPTEE_MSG parameters
161 * @num_params: number of elements in the parameter arrays
162 * @params: subsystem itnernal parameter representation
163 * Returns 0 on success or <0 on failure
165 int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
166 const struct tee_param *params)
171 for (n = 0; n < num_params; n++) {
172 const struct tee_param *p = params + n;
173 struct optee_msg_param *mp = msg_params + n;
176 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
177 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
178 memset(&mp->u, 0, sizeof(mp->u));
180 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
181 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
182 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
183 mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
184 TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
185 mp->u.value.a = p->u.value.a;
186 mp->u.value.b = p->u.value.b;
187 mp->u.value.c = p->u.value.c;
189 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
190 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
191 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
192 if (tee_shm_is_registered(p->u.memref.shm))
193 rc = to_msg_param_reg_mem(mp, p);
195 rc = to_msg_param_tmp_mem(mp, p);
206 static void optee_get_version(struct tee_device *teedev,
207 struct tee_ioctl_version_data *vers)
209 struct tee_ioctl_version_data v = {
210 .impl_id = TEE_IMPL_ID_OPTEE,
211 .impl_caps = TEE_OPTEE_CAP_TZ,
212 .gen_caps = TEE_GEN_CAP_GP,
214 struct optee *optee = tee_get_drvdata(teedev);
216 if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
217 v.gen_caps |= TEE_GEN_CAP_REG_MEM;
221 static int optee_open(struct tee_context *ctx)
223 struct optee_context_data *ctxdata;
224 struct tee_device *teedev = ctx->teedev;
225 struct optee *optee = tee_get_drvdata(teedev);
227 ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
231 if (teedev == optee->supp_teedev) {
234 mutex_lock(&optee->supp.mutex);
235 if (!optee->supp.ctx) {
237 optee->supp.ctx = ctx;
239 mutex_unlock(&optee->supp.mutex);
246 mutex_init(&ctxdata->mutex);
247 INIT_LIST_HEAD(&ctxdata->sess_list);
253 static void optee_release(struct tee_context *ctx)
255 struct optee_context_data *ctxdata = ctx->data;
256 struct tee_device *teedev = ctx->teedev;
257 struct optee *optee = tee_get_drvdata(teedev);
259 struct optee_msg_arg *arg = NULL;
261 struct optee_session *sess;
262 struct optee_session *sess_tmp;
267 shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
269 arg = tee_shm_get_va(shm, 0);
271 * If va2pa fails for some reason, we can't call into
272 * secure world, only free the memory. Secure OS will leak
273 * sessions and finally refuse more sessions, but we will
274 * at least let normal world reclaim its memory.
277 if (tee_shm_va2pa(shm, arg, &parg))
278 arg = NULL; /* prevent usage of parg below */
281 list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
283 list_del(&sess->list_node);
284 if (!IS_ERR_OR_NULL(arg)) {
285 memset(arg, 0, sizeof(*arg));
286 arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
287 arg->session = sess->session_id;
288 optee_do_call_with_arg(ctx, parg);
299 if (teedev == optee->supp_teedev)
300 optee_supp_release(&optee->supp);
303 static const struct tee_driver_ops optee_ops = {
304 .get_version = optee_get_version,
306 .release = optee_release,
307 .open_session = optee_open_session,
308 .close_session = optee_close_session,
309 .invoke_func = optee_invoke_func,
310 .cancel_req = optee_cancel_req,
311 .shm_register = optee_shm_register,
312 .shm_unregister = optee_shm_unregister,
315 static const struct tee_desc optee_desc = {
316 .name = DRIVER_NAME "-clnt",
318 .owner = THIS_MODULE,
321 static const struct tee_driver_ops optee_supp_ops = {
322 .get_version = optee_get_version,
324 .release = optee_release,
325 .supp_recv = optee_supp_recv,
326 .supp_send = optee_supp_send,
327 .shm_register = optee_shm_register_supp,
328 .shm_unregister = optee_shm_unregister_supp,
331 static const struct tee_desc optee_supp_desc = {
332 .name = DRIVER_NAME "-supp",
333 .ops = &optee_supp_ops,
334 .owner = THIS_MODULE,
335 .flags = TEE_DESC_PRIVILEGED,
338 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
340 struct arm_smccc_res res;
342 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
344 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
345 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
350 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
353 struct arm_smccc_res smccc;
354 struct optee_smc_call_get_os_revision_result result;
361 invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
364 if (res.result.build_id)
365 pr_info("revision %lu.%lu (%08lx)", res.result.major,
366 res.result.minor, res.result.build_id);
368 pr_info("revision %lu.%lu", res.result.major, res.result.minor);
371 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
374 struct arm_smccc_res smccc;
375 struct optee_smc_calls_revision_result result;
378 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
380 if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
381 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
386 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
390 struct arm_smccc_res smccc;
391 struct optee_smc_exchange_capabilities_result result;
396 * TODO This isn't enough to tell if it's UP system (from kernel
397 * point of view) or not, is_smp() returns the the information
398 * needed, but can't be called directly from here.
400 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
401 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
403 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
406 if (res.result.status != OPTEE_SMC_RETURN_OK)
409 *sec_caps = res.result.capabilities;
413 static struct tee_shm_pool *optee_config_dyn_shm(void)
415 struct tee_shm_pool_mgr *priv_mgr;
416 struct tee_shm_pool_mgr *dmabuf_mgr;
419 rc = optee_shm_pool_alloc_pages();
424 rc = optee_shm_pool_alloc_pages();
426 tee_shm_pool_mgr_destroy(priv_mgr);
431 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
433 tee_shm_pool_mgr_destroy(priv_mgr);
434 tee_shm_pool_mgr_destroy(dmabuf_mgr);
440 static struct tee_shm_pool *
441 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
444 struct arm_smccc_res smccc;
445 struct optee_smc_get_shm_config_result result;
453 struct tee_shm_pool_mgr *priv_mgr;
454 struct tee_shm_pool_mgr *dmabuf_mgr;
456 const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
458 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
459 if (res.result.status != OPTEE_SMC_RETURN_OK) {
460 pr_err("static shm service not available\n");
461 return ERR_PTR(-ENOENT);
464 if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
465 pr_err("only normal cached shared memory supported\n");
466 return ERR_PTR(-EINVAL);
469 begin = roundup(res.result.start, PAGE_SIZE);
470 end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
474 if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
475 pr_err("too small shared memory area\n");
476 return ERR_PTR(-EINVAL);
479 va = memremap(paddr, size, MEMREMAP_WB);
481 pr_err("shared memory ioremap failed\n");
482 return ERR_PTR(-EINVAL);
484 vaddr = (unsigned long)va;
486 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
487 3 /* 8 bytes aligned */);
496 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
498 goto err_free_priv_mgr;
501 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
503 goto err_free_dmabuf_mgr;
505 *memremaped_shm = va;
510 tee_shm_pool_mgr_destroy(dmabuf_mgr);
512 tee_shm_pool_mgr_destroy(priv_mgr);
518 /* Simple wrapper functions to be able to use a function pointer */
519 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
520 unsigned long a2, unsigned long a3,
521 unsigned long a4, unsigned long a5,
522 unsigned long a6, unsigned long a7,
523 struct arm_smccc_res *res)
525 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
528 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
529 unsigned long a2, unsigned long a3,
530 unsigned long a4, unsigned long a5,
531 unsigned long a6, unsigned long a7,
532 struct arm_smccc_res *res)
534 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
537 static optee_invoke_fn *get_invoke_func(struct device_node *np)
541 pr_info("probing for conduit method from DT.\n");
543 if (of_property_read_string(np, "method", &method)) {
544 pr_warn("missing \"method\" property\n");
545 return ERR_PTR(-ENXIO);
548 if (!strcmp("hvc", method))
549 return optee_smccc_hvc;
550 else if (!strcmp("smc", method))
551 return optee_smccc_smc;
553 pr_warn("invalid \"method\" property: %s\n", method);
554 return ERR_PTR(-EINVAL);
557 static struct optee *optee_probe(struct device_node *np)
559 optee_invoke_fn *invoke_fn;
560 struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
561 struct optee *optee = NULL;
562 void *memremaped_shm = NULL;
563 struct tee_device *teedev;
567 invoke_fn = get_invoke_func(np);
568 if (IS_ERR(invoke_fn))
569 return (void *)invoke_fn;
571 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
572 pr_warn("api uid mismatch\n");
573 return ERR_PTR(-EINVAL);
576 optee_msg_get_os_revision(invoke_fn);
578 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
579 pr_warn("api revision mismatch\n");
580 return ERR_PTR(-EINVAL);
583 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
584 pr_warn("capabilities mismatch\n");
585 return ERR_PTR(-EINVAL);
589 * Try to use dynamic shared memory if possible
591 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
592 pool = optee_config_dyn_shm();
595 * If dynamic shared memory is not available or failed - try static one
597 if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
598 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
603 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
609 optee->invoke_fn = invoke_fn;
610 optee->sec_caps = sec_caps;
612 teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
613 if (IS_ERR(teedev)) {
614 rc = PTR_ERR(teedev);
617 optee->teedev = teedev;
619 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
620 if (IS_ERR(teedev)) {
621 rc = PTR_ERR(teedev);
624 optee->supp_teedev = teedev;
626 rc = tee_device_register(optee->teedev);
630 rc = tee_device_register(optee->supp_teedev);
634 mutex_init(&optee->call_queue.mutex);
635 INIT_LIST_HEAD(&optee->call_queue.waiters);
636 optee_wait_queue_init(&optee->wait_queue);
637 optee_supp_init(&optee->supp);
638 optee->memremaped_shm = memremaped_shm;
641 optee_enable_shm_cache(optee);
643 if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
644 pr_info("dynamic shared memory is enabled\n");
650 * tee_device_unregister() is safe to call even if the
651 * devices hasn't been registered with
652 * tee_device_register() yet.
654 tee_device_unregister(optee->supp_teedev);
655 tee_device_unregister(optee->teedev);
659 tee_shm_pool_free(pool);
661 memunmap(memremaped_shm);
665 static void optee_remove(struct optee *optee)
668 * Ask OP-TEE to free all cached shared memory objects to decrease
669 * reference counters and also avoid wild pointers in secure world
670 * into the old shared memory range.
672 optee_disable_shm_cache(optee);
675 * The two devices has to be unregistered before we can free the
678 tee_device_unregister(optee->supp_teedev);
679 tee_device_unregister(optee->teedev);
681 tee_shm_pool_free(optee->pool);
682 if (optee->memremaped_shm)
683 memunmap(optee->memremaped_shm);
684 optee_wait_queue_exit(&optee->wait_queue);
685 optee_supp_uninit(&optee->supp);
686 mutex_destroy(&optee->call_queue.mutex);
691 static const struct of_device_id optee_match[] = {
692 { .compatible = "linaro,optee-tz" },
696 static struct optee *optee_svc;
698 static int __init optee_driver_init(void)
700 struct device_node *fw_np = NULL;
701 struct device_node *np = NULL;
702 struct optee *optee = NULL;
705 /* Node is supposed to be below /firmware */
706 fw_np = of_find_node_by_name(NULL, "firmware");
710 np = of_find_matching_node(fw_np, optee_match);
711 if (!np || !of_device_is_available(np)) {
716 optee = optee_probe(np);
720 return PTR_ERR(optee);
722 rc = optee_enumerate_devices();
728 pr_info("initialized driver\n");
734 module_init(optee_driver_init);
736 static void __exit optee_driver_exit(void)
738 struct optee *optee = optee_svc;
744 module_exit(optee_driver_exit);
746 MODULE_AUTHOR("Linaro");
747 MODULE_DESCRIPTION("OP-TEE driver");
748 MODULE_SUPPORTED_DEVICE("");
749 MODULE_VERSION("1.0");
750 MODULE_LICENSE("GPL v2");