forked from Minki/linux
This pull request enables dynamic shared memory support in the TEE
subsystem as a whole and in OP-TEE in particular. Global Platform TEE specification [1] allows client applications to register part of own memory as a shared buffer between application and TEE. This allows fast zero-copy communication between TEE and REE. But current implementation of TEE in Linux does not support this feature. Also, current implementation of OP-TEE transport uses fixed size pre-shared buffer for all communications with OP-TEE OS. This is okay in the most use cases. But this prevents use of OP-TEE in virtualized environments, because: a) We can't share the same buffer between different virtual machines b) Physically contiguous memory as seen by VM can be non-contiguous in reality (and as seen by OP-TEE OS) due to second stage of MMU translation. c) Size of this pre-shared buffer is limited. So, first part of this pull request adds generic register/unregister interface to tee subsystem. The second part adds necessary features into OP-TEE driver, so it can use not only static pre-shared buffer, but whole RAM to communicate with OP-TEE OS. This change is backwards compatible allowing older secure world or user space to work with newer kernels and vice versa. [1] https://www.globalplatform.org/specificationsdevice.asp -----BEGIN PGP SIGNATURE----- iQI3BAABCgAhBQJaM8X7GhxqZW5zLndpa2xhbmRlckBsaW5hcm8ub3JnAAoJELWw uEGXj+zThYsQAMPsMwvV977gLCnFxSZuIh1qnK5sXabpe4ITVOaUaxyCIoKAcROX exFdo1l+4UrOaEA9o06IROnHczCEz7IvGcPVYCB13tHwyfPsuicrdM0b/hm2Mehx MGYDsm3ZjnUTcZxGMNHYvCunNi84Rt1yOC8Mdx4kPhCI8ZCDqb9pV/Bb5wNLnkXS lXP/+EAkF0ECj88JUhgunkvL96QyK/PROCNUMWansB1RwglvyWy7IS/r03BW9Cpi 4Mtiywmj/KZO9To4LvWhPiX5xvdxe+VxXUD6BW9hVVOxmXGSTEwr9YYr0f7qWH5q HeTLzkOsRQ+uHkaSLZOJ1HkIsP0sYQ7tR6OaipAEMJIN87ktGr45uuxaMnJCV1Z/ tiKkGKJq9VISa7LA0Fv3nLhfYo8/jHiV/dV77FTreHhWimtVl3aiIkon+P/VSA7W Qstkq/v+djZXSmJ+dAcaRdukufWLUB4xhl27isnmaVjToFUHJH36wM9smtgXFygv DL8+5UBgsWPOlpJkIsTD/dwiQK+CeG4/SASgfe5DV7GVh+Z+71E2V40UQ9JoUROa Y33tPFWg07gG3cHAZYugKG2ucf4Yy3GXh5xZnjIq0Ye1U3/TnbK543V1y2N45vx0 xBWJFFh2blKD04QPynBFqKPKNc5d//OgeK3m4PBTYk2GoGIvnc5YxPTq =3iwl -----END PGP SIGNATURE----- Merge tag 'tee-drv-dynamic-shm-for-v4.16' of https://git.linaro.org/people/jens.wiklander/linux-tee into next/drivers Pull "tee dynamic shm for v4.16" from Jens Wiklander: This pull request enables dynamic shared memory support in the TEE subsystem as a whole and in OP-TEE in particular. Global Platform TEE specification [1] allows client applications to register part of own memory as a shared buffer between application and TEE. This allows fast zero-copy communication between TEE and REE. But current implementation of TEE in Linux does not support this feature. Also, current implementation of OP-TEE transport uses fixed size pre-shared buffer for all communications with OP-TEE OS. This is okay in the most use cases. But this prevents use of OP-TEE in virtualized environments, because: a) We can't share the same buffer between different virtual machines b) Physically contiguous memory as seen by VM can be non-contiguous in reality (and as seen by OP-TEE OS) due to second stage of MMU translation. c) Size of this pre-shared buffer is limited. So, first part of this pull request adds generic register/unregister interface to tee subsystem. The second part adds necessary features into OP-TEE driver, so it can use not only static pre-shared buffer, but whole RAM to communicate with OP-TEE OS. This change is backwards compatible allowing older secure world or user space to work with newer kernels and vice versa. [1] https://www.globalplatform.org/specificationsdevice.asp * tag 'tee-drv-dynamic-shm-for-v4.16' of https://git.linaro.org/people/jens.wiklander/linux-tee: tee: shm: inline tee_shm_get_id() tee: use reference counting for tee_context tee: optee: enable dynamic SHM support tee: optee: add optee-specific shared pool implementation tee: optee: store OP-TEE capabilities in private data tee: optee: add registered buffers handling into RPC calls tee: optee: add registered shared parameters handling tee: optee: add shared buffer registration functions tee: optee: add page list manipulation functions tee: optee: Update protocol definitions tee: shm: add page accessor functions tee: shm: add accessors for buffer size and page offset tee: add register user memory tee: flexible shared memory pool creation
This commit is contained in:
commit
bad19e0d04
@ -4,3 +4,4 @@ optee-objs += core.o
|
||||
optee-objs += call.o
|
||||
optee-objs += rpc.o
|
||||
optee-objs += supp.o
|
||||
optee-objs += shm_pool.o
|
||||
|
@ -11,6 +11,7 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
@ -135,6 +136,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
|
||||
struct optee *optee = tee_get_drvdata(ctx->teedev);
|
||||
struct optee_call_waiter w;
|
||||
struct optee_rpc_param param = { };
|
||||
struct optee_call_ctx call_ctx = { };
|
||||
u32 ret;
|
||||
|
||||
param.a0 = OPTEE_SMC_CALL_WITH_ARG;
|
||||
@ -159,13 +161,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
|
||||
param.a1 = res.a1;
|
||||
param.a2 = res.a2;
|
||||
param.a3 = res.a3;
|
||||
optee_handle_rpc(ctx, ¶m);
|
||||
optee_handle_rpc(ctx, ¶m, &call_ctx);
|
||||
} else {
|
||||
ret = res.a0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
optee_rpc_finalize_call(&call_ctx);
|
||||
/*
|
||||
* We're done with our thread in secure world, if there's any
|
||||
* thread waiters wake up one.
|
||||
@ -442,3 +445,177 @@ void optee_disable_shm_cache(struct optee *optee)
|
||||
}
|
||||
optee_cq_wait_final(&optee->call_queue, &w);
|
||||
}
|
||||
|
||||
#define PAGELIST_ENTRIES_PER_PAGE \
|
||||
((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
|
||||
|
||||
/**
|
||||
* optee_fill_pages_list() - write list of user pages to given shared
|
||||
* buffer.
|
||||
*
|
||||
* @dst: page-aligned buffer where list of pages will be stored
|
||||
* @pages: array of pages that represents shared buffer
|
||||
* @num_pages: number of entries in @pages
|
||||
* @page_offset: offset of user buffer from page start
|
||||
*
|
||||
* @dst should be big enough to hold list of user page addresses and
|
||||
* links to the next pages of buffer
|
||||
*/
|
||||
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
|
||||
size_t page_offset)
|
||||
{
|
||||
int n = 0;
|
||||
phys_addr_t optee_page;
|
||||
/*
|
||||
* Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
|
||||
* for details.
|
||||
*/
|
||||
struct {
|
||||
u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
|
||||
u64 next_page_data;
|
||||
} *pages_data;
|
||||
|
||||
/*
|
||||
* Currently OP-TEE uses 4k page size and it does not looks
|
||||
* like this will change in the future. On other hand, there are
|
||||
* no know ARM architectures with page size < 4k.
|
||||
* Thus the next built assert looks redundant. But the following
|
||||
* code heavily relies on this assumption, so it is better be
|
||||
* safe than sorry.
|
||||
*/
|
||||
BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
|
||||
|
||||
pages_data = (void *)dst;
|
||||
/*
|
||||
* If linux page is bigger than 4k, and user buffer offset is
|
||||
* larger than 4k/8k/12k/etc this will skip first 4k pages,
|
||||
* because they bear no value data for OP-TEE.
|
||||
*/
|
||||
optee_page = page_to_phys(*pages) +
|
||||
round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
|
||||
|
||||
while (true) {
|
||||
pages_data->pages_list[n++] = optee_page;
|
||||
|
||||
if (n == PAGELIST_ENTRIES_PER_PAGE) {
|
||||
pages_data->next_page_data =
|
||||
virt_to_phys(pages_data + 1);
|
||||
pages_data++;
|
||||
n = 0;
|
||||
}
|
||||
|
||||
optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
|
||||
if (!(optee_page & ~PAGE_MASK)) {
|
||||
if (!--num_pages)
|
||||
break;
|
||||
pages++;
|
||||
optee_page = page_to_phys(*pages);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The final entry in each pagelist page is a pointer to the next
|
||||
* pagelist page.
|
||||
*/
|
||||
static size_t get_pages_list_size(size_t num_entries)
|
||||
{
|
||||
int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
|
||||
|
||||
return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
|
||||
}
|
||||
|
||||
u64 *optee_allocate_pages_list(size_t num_entries)
|
||||
{
|
||||
return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
|
||||
}
|
||||
|
||||
void optee_free_pages_list(void *list, size_t num_entries)
|
||||
{
|
||||
free_pages_exact(list, get_pages_list_size(num_entries));
|
||||
}
|
||||
|
||||
int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
|
||||
struct page **pages, size_t num_pages)
|
||||
{
|
||||
struct tee_shm *shm_arg = NULL;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
u64 *pages_list;
|
||||
phys_addr_t msg_parg;
|
||||
int rc = 0;
|
||||
|
||||
if (!num_pages)
|
||||
return -EINVAL;
|
||||
|
||||
pages_list = optee_allocate_pages_list(num_pages);
|
||||
if (!pages_list)
|
||||
return -ENOMEM;
|
||||
|
||||
shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
|
||||
if (IS_ERR(shm_arg)) {
|
||||
rc = PTR_ERR(shm_arg);
|
||||
goto out;
|
||||
}
|
||||
|
||||
optee_fill_pages_list(pages_list, pages, num_pages,
|
||||
tee_shm_get_page_offset(shm));
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
|
||||
msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
|
||||
OPTEE_MSG_ATTR_NONCONTIG;
|
||||
msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
|
||||
msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
|
||||
/*
|
||||
* In the least bits of msg_arg->params->u.tmem.buf_ptr we
|
||||
* store buffer offset from 4k page, as described in OP-TEE ABI.
|
||||
*/
|
||||
msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
|
||||
(tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
|
||||
|
||||
if (optee_do_call_with_arg(ctx, msg_parg) ||
|
||||
msg_arg->ret != TEEC_SUCCESS)
|
||||
rc = -EINVAL;
|
||||
|
||||
tee_shm_free(shm_arg);
|
||||
out:
|
||||
optee_free_pages_list(pages_list, num_pages);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
|
||||
{
|
||||
struct tee_shm *shm_arg;
|
||||
struct optee_msg_arg *msg_arg;
|
||||
phys_addr_t msg_parg;
|
||||
int rc = 0;
|
||||
|
||||
shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
|
||||
if (IS_ERR(shm_arg))
|
||||
return PTR_ERR(shm_arg);
|
||||
|
||||
msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
|
||||
|
||||
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
|
||||
msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
|
||||
|
||||
if (optee_do_call_with_arg(ctx, msg_parg) ||
|
||||
msg_arg->ret != TEEC_SUCCESS)
|
||||
rc = -EINVAL;
|
||||
tee_shm_free(shm_arg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
|
||||
struct page **pages, size_t num_pages)
|
||||
{
|
||||
/*
|
||||
* We don't want to register supplicant memory in OP-TEE.
|
||||
* Instead information about it will be passed in RPC code.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include "optee_private.h"
|
||||
#include "optee_smc.h"
|
||||
#include "shm_pool.h"
|
||||
|
||||
#define DRIVER_NAME "optee"
|
||||
|
||||
@ -97,6 +98,25 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
||||
return rc;
|
||||
}
|
||||
break;
|
||||
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
|
||||
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
|
||||
case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
|
||||
p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
|
||||
attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
|
||||
p->u.memref.size = mp->u.rmem.size;
|
||||
shm = (struct tee_shm *)(unsigned long)
|
||||
mp->u.rmem.shm_ref;
|
||||
|
||||
if (!shm) {
|
||||
p->u.memref.shm_offs = 0;
|
||||
p->u.memref.shm = NULL;
|
||||
break;
|
||||
}
|
||||
p->u.memref.shm_offs = mp->u.rmem.offs;
|
||||
p->u.memref.shm = shm;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -104,6 +124,46 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
|
||||
const struct tee_param *p)
|
||||
{
|
||||
int rc;
|
||||
phys_addr_t pa;
|
||||
|
||||
mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
|
||||
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
|
||||
|
||||
mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
|
||||
mp->u.tmem.size = p->u.memref.size;
|
||||
|
||||
if (!p->u.memref.shm) {
|
||||
mp->u.tmem.buf_ptr = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mp->u.tmem.buf_ptr = pa;
|
||||
mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
|
||||
OPTEE_MSG_ATTR_CACHE_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int to_msg_param_reg_mem(struct optee_msg_param *mp,
|
||||
const struct tee_param *p)
|
||||
{
|
||||
mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
|
||||
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
|
||||
|
||||
mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
|
||||
mp->u.rmem.size = p->u.memref.size;
|
||||
mp->u.rmem.offs = p->u.memref.shm_offs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
|
||||
* @msg_params: OPTEE_MSG parameters
|
||||
@ -116,7 +176,6 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
|
||||
{
|
||||
int rc;
|
||||
size_t n;
|
||||
phys_addr_t pa;
|
||||
|
||||
for (n = 0; n < num_params; n++) {
|
||||
const struct tee_param *p = params + n;
|
||||
@ -139,22 +198,12 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
|
||||
p->attr -
|
||||
TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
|
||||
mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
|
||||
mp->u.tmem.size = p->u.memref.size;
|
||||
if (!p->u.memref.shm) {
|
||||
mp->u.tmem.buf_ptr = 0;
|
||||
break;
|
||||
}
|
||||
rc = tee_shm_get_pa(p->u.memref.shm,
|
||||
p->u.memref.shm_offs, &pa);
|
||||
if (tee_shm_is_registered(p->u.memref.shm))
|
||||
rc = to_msg_param_reg_mem(mp, p);
|
||||
else
|
||||
rc = to_msg_param_tmp_mem(mp, p);
|
||||
if (rc)
|
||||
return rc;
|
||||
mp->u.tmem.buf_ptr = pa;
|
||||
mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
|
||||
OPTEE_MSG_ATTR_CACHE_SHIFT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -171,6 +220,10 @@ static void optee_get_version(struct tee_device *teedev,
|
||||
.impl_caps = TEE_OPTEE_CAP_TZ,
|
||||
.gen_caps = TEE_GEN_CAP_GP,
|
||||
};
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
|
||||
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
|
||||
v.gen_caps |= TEE_GEN_CAP_REG_MEM;
|
||||
*vers = v;
|
||||
}
|
||||
|
||||
@ -264,6 +317,8 @@ static const struct tee_driver_ops optee_ops = {
|
||||
.close_session = optee_close_session,
|
||||
.invoke_func = optee_invoke_func,
|
||||
.cancel_req = optee_cancel_req,
|
||||
.shm_register = optee_shm_register,
|
||||
.shm_unregister = optee_shm_unregister,
|
||||
};
|
||||
|
||||
static const struct tee_desc optee_desc = {
|
||||
@ -278,6 +333,8 @@ static const struct tee_driver_ops optee_supp_ops = {
|
||||
.release = optee_release,
|
||||
.supp_recv = optee_supp_recv,
|
||||
.supp_send = optee_supp_send,
|
||||
.shm_register = optee_shm_register_supp,
|
||||
.shm_unregister = optee_shm_unregister_supp,
|
||||
};
|
||||
|
||||
static const struct tee_desc optee_supp_desc = {
|
||||
@ -342,21 +399,22 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
|
||||
}
|
||||
|
||||
static struct tee_shm_pool *
|
||||
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
|
||||
u32 sec_caps)
|
||||
{
|
||||
union {
|
||||
struct arm_smccc_res smccc;
|
||||
struct optee_smc_get_shm_config_result result;
|
||||
} res;
|
||||
struct tee_shm_pool *pool;
|
||||
unsigned long vaddr;
|
||||
phys_addr_t paddr;
|
||||
size_t size;
|
||||
phys_addr_t begin;
|
||||
phys_addr_t end;
|
||||
void *va;
|
||||
struct tee_shm_pool_mem_info priv_info;
|
||||
struct tee_shm_pool_mem_info dmabuf_info;
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
|
||||
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
|
||||
if (res.result.status != OPTEE_SMC_RETURN_OK) {
|
||||
@ -386,22 +444,49 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
}
|
||||
vaddr = (unsigned long)va;
|
||||
|
||||
priv_info.vaddr = vaddr;
|
||||
priv_info.paddr = paddr;
|
||||
priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
/*
|
||||
* If OP-TEE can work with unregistered SHM, we will use own pool
|
||||
* for private shm
|
||||
*/
|
||||
if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
|
||||
rc = optee_shm_pool_alloc_pages();
|
||||
if (IS_ERR(rc))
|
||||
goto err_memunmap;
|
||||
priv_mgr = rc;
|
||||
} else {
|
||||
const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
|
||||
pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
|
||||
if (IS_ERR(pool)) {
|
||||
memunmap(va);
|
||||
goto out;
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
|
||||
3 /* 8 bytes aligned */);
|
||||
if (IS_ERR(rc))
|
||||
goto err_memunmap;
|
||||
priv_mgr = rc;
|
||||
|
||||
vaddr += sz;
|
||||
paddr += sz;
|
||||
size -= sz;
|
||||
}
|
||||
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_priv_mgr;
|
||||
dmabuf_mgr = rc;
|
||||
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_dmabuf_mgr;
|
||||
|
||||
*memremaped_shm = va;
|
||||
out:
|
||||
return pool;
|
||||
|
||||
return rc;
|
||||
|
||||
err_free_dmabuf_mgr:
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
err_free_priv_mgr:
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
err_memunmap:
|
||||
memunmap(va);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Simple wrapper functions to be able to use a function pointer */
|
||||
@ -479,7 +564,7 @@ static struct optee *optee_probe(struct device_node *np)
|
||||
if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
|
||||
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
|
||||
if (IS_ERR(pool))
|
||||
return (void *)pool;
|
||||
|
||||
@ -490,6 +575,7 @@ static struct optee *optee_probe(struct device_node *np)
|
||||
}
|
||||
|
||||
optee->invoke_fn = invoke_fn;
|
||||
optee->sec_caps = sec_caps;
|
||||
|
||||
teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
|
||||
if (IS_ERR(teedev)) {
|
||||
|
@ -67,11 +67,32 @@
|
||||
#define OPTEE_MSG_ATTR_META BIT(8)
|
||||
|
||||
/*
|
||||
* The temporary shared memory object is not physically contigous and this
|
||||
* temp memref is followed by another fragment until the last temp memref
|
||||
* that doesn't have this bit set.
|
||||
* Pointer to a list of pages used to register user-defined SHM buffer.
|
||||
* Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
|
||||
* buf_ptr should point to the beginning of the buffer. Buffer will contain
|
||||
* list of page addresses. OP-TEE core can reconstruct contiguous buffer from
|
||||
* that page addresses list. Page addresses are stored as 64 bit values.
|
||||
* Last entry on a page should point to the next page of buffer.
|
||||
* Every entry in buffer should point to a 4k page beginning (12 least
|
||||
* significant bits must be equal to zero).
|
||||
*
|
||||
* 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
|
||||
* offset of the user buffer.
|
||||
*
|
||||
* So, entries should be placed like members of this structure:
|
||||
*
|
||||
* struct page_data {
|
||||
* uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
|
||||
* uint64_t next_page_data;
|
||||
* };
|
||||
*
|
||||
* Structure is designed to exactly fit into the page size
|
||||
* OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
|
||||
*
|
||||
* The size of 4KB is chosen because this is the smallest page size for ARM
|
||||
* architectures. If REE uses larger pages, it should divide them to 4KB ones.
|
||||
*/
|
||||
#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
|
||||
#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
|
||||
|
||||
/*
|
||||
* Memory attributes for caching passed with temp memrefs. The actual value
|
||||
@ -94,6 +115,11 @@
|
||||
#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
|
||||
#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
|
||||
|
||||
/*
|
||||
* Page size used in non-contiguous buffer entries
|
||||
*/
|
||||
#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
|
||||
|
||||
/**
|
||||
* struct optee_msg_param_tmem - temporary memory reference parameter
|
||||
* @buf_ptr: Address of the buffer
|
||||
@ -145,8 +171,8 @@ struct optee_msg_param_value {
|
||||
*
|
||||
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
|
||||
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
|
||||
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
|
||||
* OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
|
||||
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
|
||||
* OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
|
||||
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
|
||||
*/
|
||||
struct optee_msg_param {
|
||||
|
@ -84,6 +84,8 @@ struct optee_supp {
|
||||
* @supp: supplicant synchronization struct for RPC to supplicant
|
||||
* @pool: shared memory pool
|
||||
* @memremaped_shm virtual address of memory in shared memory pool
|
||||
* @sec_caps: secure world capabilities defined by
|
||||
* OPTEE_SMC_SEC_CAP_* in optee_smc.h
|
||||
*/
|
||||
struct optee {
|
||||
struct tee_device *supp_teedev;
|
||||
@ -94,6 +96,7 @@ struct optee {
|
||||
struct optee_supp supp;
|
||||
struct tee_shm_pool *pool;
|
||||
void *memremaped_shm;
|
||||
u32 sec_caps;
|
||||
};
|
||||
|
||||
struct optee_session {
|
||||
@ -118,7 +121,16 @@ struct optee_rpc_param {
|
||||
u32 a7;
|
||||
};
|
||||
|
||||
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
|
||||
/* Holds context that is preserved during one STD call */
|
||||
struct optee_call_ctx {
|
||||
/* information about pages list used in last allocation */
|
||||
void *pages_list;
|
||||
size_t num_entries;
|
||||
};
|
||||
|
||||
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
|
||||
struct optee_call_ctx *call_ctx);
|
||||
void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
|
||||
|
||||
void optee_wait_queue_init(struct optee_wait_queue *wq);
|
||||
void optee_wait_queue_exit(struct optee_wait_queue *wq);
|
||||
@ -149,11 +161,24 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
|
||||
void optee_enable_shm_cache(struct optee *optee);
|
||||
void optee_disable_shm_cache(struct optee *optee);
|
||||
|
||||
int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
|
||||
struct page **pages, size_t num_pages);
|
||||
int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
|
||||
|
||||
int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
|
||||
struct page **pages, size_t num_pages);
|
||||
int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
|
||||
|
||||
int optee_from_msg_param(struct tee_param *params, size_t num_params,
|
||||
const struct optee_msg_param *msg_params);
|
||||
int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
|
||||
const struct tee_param *params);
|
||||
|
||||
u64 *optee_allocate_pages_list(size_t num_entries);
|
||||
void optee_free_pages_list(void *array, size_t num_entries);
|
||||
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
|
||||
size_t page_offset);
|
||||
|
||||
/*
|
||||
* Small helpers
|
||||
*/
|
||||
|
@ -222,6 +222,13 @@ struct optee_smc_get_shm_config_result {
|
||||
#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
|
||||
/* Secure world can communicate via previously unregistered shared memory */
|
||||
#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
|
||||
|
||||
/*
|
||||
* Secure world supports commands "register/unregister shared memory",
|
||||
* secure world accepts command buffers located in any parts of non-secure RAM
|
||||
*/
|
||||
#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
|
||||
|
||||
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
|
||||
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
|
||||
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
|
||||
|
@ -200,7 +200,8 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
struct optee_msg_arg *arg)
|
||||
struct optee_msg_arg *arg,
|
||||
struct optee_call_ctx *call_ctx)
|
||||
{
|
||||
phys_addr_t pa;
|
||||
struct tee_shm *shm;
|
||||
@ -245,10 +246,49 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
goto bad;
|
||||
}
|
||||
|
||||
sz = tee_shm_get_size(shm);
|
||||
|
||||
if (tee_shm_is_registered(shm)) {
|
||||
struct page **pages;
|
||||
u64 *pages_list;
|
||||
size_t page_num;
|
||||
|
||||
pages = tee_shm_get_pages(shm, &page_num);
|
||||
if (!pages || !page_num) {
|
||||
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
pages_list = optee_allocate_pages_list(page_num);
|
||||
if (!pages_list) {
|
||||
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
call_ctx->pages_list = pages_list;
|
||||
call_ctx->num_entries = page_num;
|
||||
|
||||
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
|
||||
OPTEE_MSG_ATTR_NONCONTIG;
|
||||
/*
|
||||
* In the least bits of u.tmem.buf_ptr we store buffer offset
|
||||
* from 4k page, as described in OP-TEE ABI.
|
||||
*/
|
||||
arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
|
||||
(tee_shm_get_page_offset(shm) &
|
||||
(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
|
||||
arg->params[0].u.tmem.size = tee_shm_get_size(shm);
|
||||
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
|
||||
|
||||
optee_fill_pages_list(pages_list, pages, page_num,
|
||||
tee_shm_get_page_offset(shm));
|
||||
} else {
|
||||
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
|
||||
arg->params[0].u.tmem.buf_ptr = pa;
|
||||
arg->params[0].u.tmem.size = sz;
|
||||
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
|
||||
}
|
||||
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
return;
|
||||
bad:
|
||||
@ -307,8 +347,24 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
|
||||
arg->ret = TEEC_SUCCESS;
|
||||
}
|
||||
|
||||
static void free_pages_list(struct optee_call_ctx *call_ctx)
|
||||
{
|
||||
if (call_ctx->pages_list) {
|
||||
optee_free_pages_list(call_ctx->pages_list,
|
||||
call_ctx->num_entries);
|
||||
call_ctx->pages_list = NULL;
|
||||
call_ctx->num_entries = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
|
||||
{
|
||||
free_pages_list(call_ctx);
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
|
||||
struct tee_shm *shm)
|
||||
struct tee_shm *shm,
|
||||
struct optee_call_ctx *call_ctx)
|
||||
{
|
||||
struct optee_msg_arg *arg;
|
||||
|
||||
@ -329,7 +385,8 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
|
||||
handle_rpc_func_cmd_wait(arg);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
|
||||
handle_rpc_func_cmd_shm_alloc(ctx, arg);
|
||||
free_pages_list(call_ctx);
|
||||
handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
|
||||
break;
|
||||
case OPTEE_MSG_RPC_CMD_SHM_FREE:
|
||||
handle_rpc_func_cmd_shm_free(ctx, arg);
|
||||
@ -343,10 +400,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
|
||||
* optee_handle_rpc() - handle RPC from secure world
|
||||
* @ctx: context doing the RPC
|
||||
* @param: value of registers for the RPC
|
||||
* @call_ctx: call context. Preserved during one OP-TEE invocation
|
||||
*
|
||||
* Result of RPC is written back into @param.
|
||||
*/
|
||||
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
|
||||
void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
|
||||
struct optee_call_ctx *call_ctx)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct optee *optee = tee_get_drvdata(teedev);
|
||||
@ -381,7 +440,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
|
||||
break;
|
||||
case OPTEE_SMC_RPC_FUNC_CMD:
|
||||
shm = reg_pair_to_ptr(param->a1, param->a2);
|
||||
handle_rpc_func_cmd(ctx, optee, shm);
|
||||
handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown RPC func 0x%x\n",
|
||||
|
75
drivers/tee/optee/shm_pool.c
Normal file
75
drivers/tee/optee/shm_pool.c
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
* Copyright (c) 2017, EPAM Systems
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tee_drv.h>
|
||||
#include "optee_private.h"
|
||||
#include "optee_smc.h"
|
||||
#include "shm_pool.h"
|
||||
|
||||
static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
shm->kaddr = page_address(page);
|
||||
shm->paddr = page_to_phys(page);
|
||||
shm->size = PAGE_SIZE << order;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pool_op_free(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm)
|
||||
{
|
||||
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
|
||||
shm->kaddr = NULL;
|
||||
}
|
||||
|
||||
static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
|
||||
{
|
||||
kfree(poolm);
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ops = {
|
||||
.alloc = pool_op_alloc,
|
||||
.free = pool_op_free,
|
||||
.destroy_poolmgr = pool_op_destroy_poolmgr,
|
||||
};
|
||||
|
||||
/**
|
||||
* optee_shm_pool_alloc_pages() - create page-based allocator pool
|
||||
*
|
||||
* This pool is used when OP-TEE supports dymanic SHM. In this case
|
||||
* command buffers and such are allocated from kernel's own memory.
|
||||
*/
|
||||
struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
|
||||
if (!mgr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mgr->ops = &pool_ops;
|
||||
|
||||
return mgr;
|
||||
}
|
23
drivers/tee/optee/shm_pool.h
Normal file
23
drivers/tee/optee/shm_pool.h
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
* Copyright (c) 2016, EPAM Systems
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHM_POOL_H
|
||||
#define SHM_POOL_H
|
||||
|
||||
#include <linux/tee_drv.h>
|
||||
|
||||
struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
|
||||
|
||||
#endif
|
@ -54,6 +54,7 @@ static int tee_open(struct inode *inode, struct file *filp)
|
||||
goto err;
|
||||
}
|
||||
|
||||
kref_init(&ctx->refcount);
|
||||
ctx->teedev = teedev;
|
||||
INIT_LIST_HEAD(&ctx->list_shm);
|
||||
filp->private_data = ctx;
|
||||
@ -68,19 +69,40 @@ err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void teedev_ctx_get(struct tee_context *ctx)
|
||||
{
|
||||
if (ctx->releasing)
|
||||
return;
|
||||
|
||||
kref_get(&ctx->refcount);
|
||||
}
|
||||
|
||||
static void teedev_ctx_release(struct kref *ref)
|
||||
{
|
||||
struct tee_context *ctx = container_of(ref, struct tee_context,
|
||||
refcount);
|
||||
ctx->releasing = true;
|
||||
ctx->teedev->desc->ops->release(ctx);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
void teedev_ctx_put(struct tee_context *ctx)
|
||||
{
|
||||
if (ctx->releasing)
|
||||
return;
|
||||
|
||||
kref_put(&ctx->refcount, teedev_ctx_release);
|
||||
}
|
||||
|
||||
static void teedev_close_context(struct tee_context *ctx)
|
||||
{
|
||||
tee_device_put(ctx->teedev);
|
||||
teedev_ctx_put(ctx);
|
||||
}
|
||||
|
||||
static int tee_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct tee_context *ctx = filp->private_data;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm *shm;
|
||||
|
||||
ctx->teedev->desc->ops->release(ctx);
|
||||
mutex_lock(&ctx->teedev->mutex);
|
||||
list_for_each_entry(shm, &ctx->list_shm, link)
|
||||
shm->ctx = NULL;
|
||||
mutex_unlock(&ctx->teedev->mutex);
|
||||
kfree(ctx);
|
||||
tee_device_put(teedev);
|
||||
teedev_close_context(filp->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -114,8 +136,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
|
||||
if (data.flags)
|
||||
return -EINVAL;
|
||||
|
||||
data.id = -1;
|
||||
|
||||
shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
@ -138,6 +158,43 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
tee_ioctl_shm_register(struct tee_context *ctx,
|
||||
struct tee_ioctl_shm_register_data __user *udata)
|
||||
{
|
||||
long ret;
|
||||
struct tee_ioctl_shm_register_data data;
|
||||
struct tee_shm *shm;
|
||||
|
||||
if (copy_from_user(&data, udata, sizeof(data)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Currently no input flags are supported */
|
||||
if (data.flags)
|
||||
return -EINVAL;
|
||||
|
||||
shm = tee_shm_register(ctx, data.addr, data.length,
|
||||
TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
data.id = shm->id;
|
||||
data.flags = shm->flags;
|
||||
data.length = shm->size;
|
||||
|
||||
if (copy_to_user(udata, &data, sizeof(data)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = tee_shm_get_fd(shm);
|
||||
/*
|
||||
* When user space closes the file descriptor the shared memory
|
||||
* should be freed or if tee_shm_get_fd() failed then it will
|
||||
* be freed immediately.
|
||||
*/
|
||||
tee_shm_put(shm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int params_from_user(struct tee_context *ctx, struct tee_param *params,
|
||||
size_t num_params,
|
||||
struct tee_ioctl_param __user *uparams)
|
||||
@ -578,6 +635,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return tee_ioctl_version(ctx, uarg);
|
||||
case TEE_IOC_SHM_ALLOC:
|
||||
return tee_ioctl_shm_alloc(ctx, uarg);
|
||||
case TEE_IOC_SHM_REGISTER:
|
||||
return tee_ioctl_shm_register(ctx, uarg);
|
||||
case TEE_IOC_OPEN_SESSION:
|
||||
return tee_ioctl_open_session(ctx, uarg);
|
||||
case TEE_IOC_INVOKE:
|
||||
|
@ -21,68 +21,15 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct tee_device;
|
||||
|
||||
/**
|
||||
* struct tee_shm - shared memory object
|
||||
* @teedev: device used to allocate the object
|
||||
* @ctx: context using the object, if NULL the context is gone
|
||||
* @link link element
|
||||
* @paddr: physical address of the shared memory
|
||||
* @kaddr: virtual address of the shared memory
|
||||
* @size: size of shared memory
|
||||
* @dmabuf: dmabuf used to for exporting to user space
|
||||
* @flags: defined by TEE_SHM_* in tee_drv.h
|
||||
* @id: unique id of a shared memory object on this device
|
||||
*/
|
||||
struct tee_shm {
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
struct list_head link;
|
||||
phys_addr_t paddr;
|
||||
void *kaddr;
|
||||
size_t size;
|
||||
struct dma_buf *dmabuf;
|
||||
u32 flags;
|
||||
int id;
|
||||
};
|
||||
|
||||
struct tee_shm_pool_mgr;
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr_ops - shared memory pool manager operations
|
||||
* @alloc: called when allocating shared memory
|
||||
* @free: called when freeing shared memory
|
||||
*/
|
||||
struct tee_shm_pool_mgr_ops {
|
||||
int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
|
||||
size_t size);
|
||||
void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr - shared memory manager
|
||||
* @ops: operations
|
||||
* @private_data: private data for the shared memory manager
|
||||
*/
|
||||
struct tee_shm_pool_mgr {
|
||||
const struct tee_shm_pool_mgr_ops *ops;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool - shared memory pool
|
||||
* @private_mgr: pool manager for shared memory only between kernel
|
||||
* and secure world
|
||||
* @dma_buf_mgr: pool manager for shared memory exported to user space
|
||||
* @destroy: called when destroying the pool
|
||||
* @private_data: private data for the pool
|
||||
*/
|
||||
struct tee_shm_pool {
|
||||
struct tee_shm_pool_mgr private_mgr;
|
||||
struct tee_shm_pool_mgr dma_buf_mgr;
|
||||
void (*destroy)(struct tee_shm_pool *pool);
|
||||
void *private_data;
|
||||
struct tee_shm_pool_mgr *private_mgr;
|
||||
struct tee_shm_pool_mgr *dma_buf_mgr;
|
||||
};
|
||||
|
||||
#define TEE_DEVICE_FLAG_REGISTERED 0x1
|
||||
@ -126,4 +73,7 @@ int tee_shm_get_fd(struct tee_shm *shm);
|
||||
bool tee_device_get(struct tee_device *teedev);
|
||||
void tee_device_put(struct tee_device *teedev);
|
||||
|
||||
void teedev_ctx_get(struct tee_context *ctx);
|
||||
void teedev_ctx_put(struct tee_context *ctx);
|
||||
|
||||
#endif /*TEE_PRIVATE_H*/
|
||||
|
@ -23,7 +23,6 @@
|
||||
static void tee_shm_release(struct tee_shm *shm)
|
||||
{
|
||||
struct tee_device *teedev = shm->teedev;
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
@ -31,12 +30,32 @@ static void tee_shm_release(struct tee_shm *shm)
|
||||
list_del(&shm->link);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
if (shm->flags & TEE_SHM_POOL) {
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
poolm = &teedev->pool->dma_buf_mgr;
|
||||
poolm = teedev->pool->dma_buf_mgr;
|
||||
else
|
||||
poolm = &teedev->pool->private_mgr;
|
||||
poolm = teedev->pool->private_mgr;
|
||||
|
||||
poolm->ops->free(poolm, shm);
|
||||
} else if (shm->flags & TEE_SHM_REGISTER) {
|
||||
size_t n;
|
||||
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
|
||||
|
||||
if (rc)
|
||||
dev_err(teedev->dev.parent,
|
||||
"unregister shm %p failed: %d", shm, rc);
|
||||
|
||||
for (n = 0; n < shm->num_pages; n++)
|
||||
put_page(shm->pages[n]);
|
||||
|
||||
kfree(shm->pages);
|
||||
}
|
||||
|
||||
if (shm->ctx)
|
||||
teedev_ctx_put(shm->ctx);
|
||||
|
||||
kfree(shm);
|
||||
|
||||
tee_device_put(teedev);
|
||||
@ -76,6 +95,10 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
struct tee_shm *shm = dmabuf->priv;
|
||||
size_t size = vma->vm_end - vma->vm_start;
|
||||
|
||||
/* Refuse sharing shared memory provided by application */
|
||||
if (shm->flags & TEE_SHM_REGISTER)
|
||||
return -EINVAL;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
|
||||
size, vma->vm_page_prot);
|
||||
}
|
||||
@ -89,26 +112,20 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
|
||||
.mmap = tee_shm_op_mmap,
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
|
||||
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
|
||||
* associated with a dma-buf handle, else driver private memory.
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
struct tee_device *teedev,
|
||||
size_t size, u32 flags)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm_pool_mgr *poolm = NULL;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int rc;
|
||||
|
||||
if (ctx && ctx->teedev != teedev) {
|
||||
dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!(flags & TEE_SHM_MAPPED)) {
|
||||
dev_err(teedev->dev.parent,
|
||||
"only mapped allocations supported\n");
|
||||
@ -135,13 +152,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
goto err_dev_put;
|
||||
}
|
||||
|
||||
shm->flags = flags;
|
||||
shm->flags = flags | TEE_SHM_POOL;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
if (flags & TEE_SHM_DMA_BUF)
|
||||
poolm = &teedev->pool->dma_buf_mgr;
|
||||
poolm = teedev->pool->dma_buf_mgr;
|
||||
else
|
||||
poolm = &teedev->pool->private_mgr;
|
||||
poolm = teedev->pool->private_mgr;
|
||||
|
||||
rc = poolm->ops->alloc(poolm, shm, size);
|
||||
if (rc) {
|
||||
@ -171,9 +188,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
goto err_rem;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
teedev_ctx_get(ctx);
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
return shm;
|
||||
err_rem:
|
||||
@ -188,8 +209,143 @@ err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
|
||||
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
|
||||
* associated with a dma-buf handle, else driver private memory.
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
|
||||
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
|
||||
{
|
||||
return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
|
||||
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int rc;
|
||||
int num_pages;
|
||||
unsigned long start;
|
||||
|
||||
if (flags != req_flags)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!teedev->desc->ops->shm_register ||
|
||||
!teedev->desc->ops->shm_unregister) {
|
||||
tee_device_put(teedev);
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
|
||||
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
|
||||
if (!shm) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
|
||||
shm->flags = flags | TEE_SHM_REGISTER;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
shm->id = -1;
|
||||
start = rounddown(addr, PAGE_SIZE);
|
||||
shm->offset = addr - start;
|
||||
shm->size = length;
|
||||
num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
|
||||
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
|
||||
if (!shm->pages) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
|
||||
if (rc > 0)
|
||||
shm->num_pages = rc;
|
||||
if (rc != num_pages) {
|
||||
if (rc > 0)
|
||||
rc = -ENOMEM;
|
||||
ret = ERR_PTR(rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
|
||||
shm->num_pages);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &tee_shm_dma_buf_ops;
|
||||
exp_info.size = shm->size;
|
||||
exp_info.flags = O_RDWR;
|
||||
exp_info.priv = shm;
|
||||
|
||||
shm->dmabuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(shm->dmabuf)) {
|
||||
ret = ERR_CAST(shm->dmabuf);
|
||||
teedev->desc->ops->shm_unregister(ctx, shm);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
return shm;
|
||||
err:
|
||||
if (shm) {
|
||||
size_t n;
|
||||
|
||||
if (shm->id >= 0) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
for (n = 0; n < shm->num_pages; n++)
|
||||
put_page(shm->pages[n]);
|
||||
kfree(shm->pages);
|
||||
}
|
||||
kfree(shm);
|
||||
teedev_ctx_put(ctx);
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register);
|
||||
|
||||
/**
|
||||
* tee_shm_get_fd() - Increase reference count and return file descriptor
|
||||
* @shm: Shared memory handle
|
||||
@ -197,10 +353,9 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
*/
|
||||
int tee_shm_get_fd(struct tee_shm *shm)
|
||||
{
|
||||
u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
|
||||
int fd;
|
||||
|
||||
if ((shm->flags & req_flags) != req_flags)
|
||||
if (!(shm->flags & TEE_SHM_DMA_BUF))
|
||||
return -EINVAL;
|
||||
|
||||
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
|
||||
@ -238,6 +393,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
|
||||
*/
|
||||
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_MAPPED))
|
||||
return -EINVAL;
|
||||
/* Check that we're in the range of the shm */
|
||||
if ((char *)va < (char *)shm->kaddr)
|
||||
return -EINVAL;
|
||||
@ -258,6 +415,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
|
||||
*/
|
||||
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_MAPPED))
|
||||
return -EINVAL;
|
||||
/* Check that we're in the range of the shm */
|
||||
if (pa < shm->paddr)
|
||||
return -EINVAL;
|
||||
@ -284,6 +443,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
|
||||
*/
|
||||
void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_MAPPED))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (offs >= shm->size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return (char *)shm->kaddr + offs;
|
||||
@ -335,17 +496,6 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
|
||||
|
||||
/**
|
||||
* tee_shm_get_id() - Get id of a shared memory object
|
||||
* @shm: Shared memory handle
|
||||
* @returns id
|
||||
*/
|
||||
int tee_shm_get_id(struct tee_shm *shm)
|
||||
{
|
||||
return shm->id;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_get_id);
|
||||
|
||||
/**
|
||||
* tee_shm_put() - Decrease reference count on a shared memory handle
|
||||
* @shm: Shared memory handle
|
||||
|
@ -44,49 +44,18 @@ static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
|
||||
shm->kaddr = NULL;
|
||||
}
|
||||
|
||||
static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
|
||||
{
|
||||
gen_pool_destroy(poolm->private_data);
|
||||
kfree(poolm);
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
|
||||
.alloc = pool_op_gen_alloc,
|
||||
.free = pool_op_gen_free,
|
||||
.destroy_poolmgr = pool_op_gen_destroy_poolmgr,
|
||||
};
|
||||
|
||||
static void pool_res_mem_destroy(struct tee_shm_pool *pool)
|
||||
{
|
||||
gen_pool_destroy(pool->private_mgr.private_data);
|
||||
gen_pool_destroy(pool->dma_buf_mgr.private_data);
|
||||
}
|
||||
|
||||
static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
|
||||
struct tee_shm_pool_mem_info *info,
|
||||
int min_alloc_order)
|
||||
{
|
||||
size_t page_mask = PAGE_SIZE - 1;
|
||||
struct gen_pool *genpool = NULL;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Start and end must be page aligned
|
||||
*/
|
||||
if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
|
||||
(info->size & page_mask))
|
||||
return -EINVAL;
|
||||
|
||||
genpool = gen_pool_create(min_alloc_order, -1);
|
||||
if (!genpool)
|
||||
return -ENOMEM;
|
||||
|
||||
gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
|
||||
rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
|
||||
-1);
|
||||
if (rc) {
|
||||
gen_pool_destroy(genpool);
|
||||
return rc;
|
||||
}
|
||||
|
||||
mgr->private_data = genpool;
|
||||
mgr->ops = &pool_ops_generic;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
|
||||
* memory range
|
||||
@ -104,43 +73,110 @@ struct tee_shm_pool *
|
||||
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
|
||||
struct tee_shm_pool_mem_info *dmabuf_info)
|
||||
{
|
||||
struct tee_shm_pool *pool = NULL;
|
||||
int ret;
|
||||
|
||||
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
if (!pool) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
|
||||
/*
|
||||
* Create the pool for driver private shared memory
|
||||
*/
|
||||
ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
|
||||
priv_info->size,
|
||||
3 /* 8 byte aligned */);
|
||||
if (ret)
|
||||
goto err;
|
||||
if (IS_ERR(rc))
|
||||
return rc;
|
||||
priv_mgr = rc;
|
||||
|
||||
/*
|
||||
* Create the pool for dma_buf shared memory
|
||||
*/
|
||||
ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
|
||||
PAGE_SHIFT);
|
||||
if (ret)
|
||||
goto err;
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
|
||||
dmabuf_info->paddr,
|
||||
dmabuf_info->size, PAGE_SHIFT);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_priv_mgr;
|
||||
dmabuf_mgr = rc;
|
||||
|
||||
pool->destroy = pool_res_mem_destroy;
|
||||
return pool;
|
||||
err:
|
||||
if (ret == -ENOMEM)
|
||||
pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
|
||||
if (pool && pool->private_mgr.private_data)
|
||||
gen_pool_destroy(pool->private_mgr.private_data);
|
||||
kfree(pool);
|
||||
return ERR_PTR(ret);
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_dmabuf_mgr;
|
||||
|
||||
return rc;
|
||||
|
||||
err_free_dmabuf_mgr:
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
err_free_priv_mgr:
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
|
||||
|
||||
struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
int min_alloc_order)
|
||||
{
|
||||
const size_t page_mask = PAGE_SIZE - 1;
|
||||
struct tee_shm_pool_mgr *mgr;
|
||||
int rc;
|
||||
|
||||
/* Start and end must be page aligned */
|
||||
if (vaddr & page_mask || paddr & page_mask || size & page_mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
if (!mgr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mgr->private_data = gen_pool_create(min_alloc_order, -1);
|
||||
if (!mgr->private_data) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
|
||||
rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
|
||||
if (rc) {
|
||||
gen_pool_destroy(mgr->private_data);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mgr->ops = &pool_ops_generic;
|
||||
|
||||
return mgr;
|
||||
err:
|
||||
kfree(mgr);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
|
||||
|
||||
static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
|
||||
{
|
||||
return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
|
||||
mgr->ops->destroy_poolmgr;
|
||||
}
|
||||
|
||||
struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr)
|
||||
{
|
||||
struct tee_shm_pool *pool;
|
||||
|
||||
if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
if (!pool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pool->private_mgr = priv_mgr;
|
||||
pool->dma_buf_mgr = dmabuf_mgr;
|
||||
|
||||
return pool;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_free() - Free a shared memory pool
|
||||
* @pool: The shared memory pool to free
|
||||
@ -150,7 +186,10 @@ EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
|
||||
*/
|
||||
void tee_shm_pool_free(struct tee_shm_pool *pool)
|
||||
{
|
||||
pool->destroy(pool);
|
||||
if (pool->private_mgr)
|
||||
tee_shm_pool_mgr_destroy(pool->private_mgr);
|
||||
if (pool->dma_buf_mgr)
|
||||
tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
|
||||
kfree(pool);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_free);
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/tee.h>
|
||||
|
||||
@ -25,8 +26,12 @@
|
||||
* specific TEE driver.
|
||||
*/
|
||||
|
||||
#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
|
||||
#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
|
||||
#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
|
||||
#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
|
||||
#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
|
||||
#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
|
||||
#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
|
||||
#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
|
||||
|
||||
struct device;
|
||||
struct tee_device;
|
||||
@ -38,11 +43,17 @@ struct tee_shm_pool;
|
||||
* @teedev: pointer to this drivers struct tee_device
|
||||
* @list_shm: List of shared memory object owned by this context
|
||||
* @data: driver specific context data, managed by the driver
|
||||
* @refcount: reference counter for this structure
|
||||
* @releasing: flag that indicates if context is being released right now.
|
||||
* It is needed to break circular dependency on context during
|
||||
* shared memory release.
|
||||
*/
|
||||
struct tee_context {
|
||||
struct tee_device *teedev;
|
||||
struct list_head list_shm;
|
||||
void *data;
|
||||
struct kref refcount;
|
||||
bool releasing;
|
||||
};
|
||||
|
||||
struct tee_param_memref {
|
||||
@ -76,6 +87,8 @@ struct tee_param {
|
||||
* @cancel_req: request cancel of an ongoing invoke or open
|
||||
* @supp_revc: called for supplicant to get a command
|
||||
* @supp_send: called for supplicant to send a response
|
||||
* @shm_register: register shared memory buffer in TEE
|
||||
* @shm_unregister: unregister shared memory buffer in TEE
|
||||
*/
|
||||
struct tee_driver_ops {
|
||||
void (*get_version)(struct tee_device *teedev,
|
||||
@ -94,6 +107,9 @@ struct tee_driver_ops {
|
||||
struct tee_param *param);
|
||||
int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
|
||||
struct tee_param *param);
|
||||
int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
|
||||
struct page **pages, size_t num_pages);
|
||||
int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -149,6 +165,97 @@ int tee_device_register(struct tee_device *teedev);
|
||||
*/
|
||||
void tee_device_unregister(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* struct tee_shm - shared memory object
|
||||
* @teedev: device used to allocate the object
|
||||
* @ctx: context using the object, if NULL the context is gone
|
||||
* @link link element
|
||||
* @paddr: physical address of the shared memory
|
||||
* @kaddr: virtual address of the shared memory
|
||||
* @size: size of shared memory
|
||||
* @offset: offset of buffer in user space
|
||||
* @pages: locked pages from userspace
|
||||
* @num_pages: number of locked pages
|
||||
* @dmabuf: dmabuf used to for exporting to user space
|
||||
* @flags: defined by TEE_SHM_* in tee_drv.h
|
||||
* @id: unique id of a shared memory object on this device
|
||||
*
|
||||
* This pool is only supposed to be accessed directly from the TEE
|
||||
* subsystem and from drivers that implements their own shm pool manager.
|
||||
*/
|
||||
struct tee_shm {
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
struct list_head link;
|
||||
phys_addr_t paddr;
|
||||
void *kaddr;
|
||||
size_t size;
|
||||
unsigned int offset;
|
||||
struct page **pages;
|
||||
size_t num_pages;
|
||||
struct dma_buf *dmabuf;
|
||||
u32 flags;
|
||||
int id;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr - shared memory manager
|
||||
* @ops: operations
|
||||
* @private_data: private data for the shared memory manager
|
||||
*/
|
||||
struct tee_shm_pool_mgr {
|
||||
const struct tee_shm_pool_mgr_ops *ops;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr_ops - shared memory pool manager operations
|
||||
* @alloc: called when allocating shared memory
|
||||
* @free: called when freeing shared memory
|
||||
* @destroy_poolmgr: called when destroying the pool manager
|
||||
*/
|
||||
struct tee_shm_pool_mgr_ops {
|
||||
int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
|
||||
size_t size);
|
||||
void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
|
||||
void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc() - Create a shared memory pool from shm managers
|
||||
* @priv_mgr: manager for driver private shared memory allocations
|
||||
* @dmabuf_mgr: manager for dma-buf shared memory allocations
|
||||
*
|
||||
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
|
||||
* in @dmabuf, others will use the range provided by @priv.
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr);
|
||||
|
||||
/*
|
||||
* tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
|
||||
* memory
|
||||
* @vaddr: Virtual address of start of pool
|
||||
* @paddr: Physical address of start of pool
|
||||
* @size: Size in bytes of the pool
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
int min_alloc_order);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_mgr_destroy() - Free a shared memory manager
|
||||
*/
|
||||
static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
|
||||
{
|
||||
poolm->ops->destroy_poolmgr(poolm);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mem_info - holds information needed to create a shared
|
||||
* memory pool
|
||||
@ -210,6 +317,40 @@ void *tee_get_drvdata(struct tee_device *teedev);
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
|
||||
|
||||
/**
|
||||
* tee_shm_priv_alloc() - Allocate shared memory privately
|
||||
* @dev: Device that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
*
|
||||
* Allocates shared memory buffer that is not associated with any client
|
||||
* context. Such buffers are owned by TEE driver and used for internal calls.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
|
||||
|
||||
/**
|
||||
* tee_shm_register() - Register shared memory buffer
|
||||
* @ctx: Context that registers the shared memory
|
||||
* @addr: Address is userspace of the shared buffer
|
||||
* @length: Length of the shared buffer
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags);
|
||||
|
||||
/**
|
||||
* tee_shm_is_registered() - Check if shared memory object in registered in TEE
|
||||
* @shm: Shared memory handle
|
||||
* @returns true if object is registered in TEE
|
||||
*/
|
||||
static inline bool tee_shm_is_registered(struct tee_shm *shm)
|
||||
{
|
||||
return shm && (shm->flags & TEE_SHM_REGISTER);
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_free() - Free shared memory
|
||||
* @shm: Handle to shared memory to free
|
||||
@ -259,12 +400,48 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
|
||||
*/
|
||||
int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
|
||||
|
||||
/**
|
||||
* tee_shm_get_size() - Get size of shared memory buffer
|
||||
* @shm: Shared memory handle
|
||||
* @returns size of shared memory
|
||||
*/
|
||||
static inline size_t tee_shm_get_size(struct tee_shm *shm)
|
||||
{
|
||||
return shm->size;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_get_pages() - Get list of pages that hold shared buffer
|
||||
* @shm: Shared memory handle
|
||||
* @num_pages: Number of pages will be stored there
|
||||
* @returns pointer to pages array
|
||||
*/
|
||||
static inline struct page **tee_shm_get_pages(struct tee_shm *shm,
|
||||
size_t *num_pages)
|
||||
{
|
||||
*num_pages = shm->num_pages;
|
||||
return shm->pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_get_page_offset() - Get shared buffer offset from page start
|
||||
* @shm: Shared memory handle
|
||||
* @returns page offset of shared buffer
|
||||
*/
|
||||
static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
|
||||
{
|
||||
return shm->offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_get_id() - Get id of a shared memory object
|
||||
* @shm: Shared memory handle
|
||||
* @returns id
|
||||
*/
|
||||
int tee_shm_get_id(struct tee_shm *shm);
|
||||
static inline int tee_shm_get_id(struct tee_shm *shm)
|
||||
{
|
||||
return shm->id;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_get_from_id() - Find shared memory object and increase reference
|
||||
|
@ -50,6 +50,7 @@
|
||||
|
||||
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
|
||||
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
|
||||
#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
|
||||
|
||||
/*
|
||||
* TEE Implementation ID
|
||||
@ -339,6 +340,35 @@ struct tee_iocl_supp_send_arg {
|
||||
#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
|
||||
struct tee_ioctl_buf_data)
|
||||
|
||||
/**
|
||||
* struct tee_ioctl_shm_register_data - Shared memory register argument
|
||||
* @addr: [in] Start address of shared memory to register
|
||||
* @length: [in/out] Length of shared memory to register
|
||||
* @flags: [in/out] Flags to/from registration.
|
||||
* @id: [out] Identifier of the shared memory
|
||||
*
|
||||
* The flags field should currently be zero as input. Updated by the call
|
||||
* with actual flags as defined by TEE_IOCTL_SHM_* above.
|
||||
* This structure is used as argument for TEE_IOC_SHM_REGISTER below.
|
||||
*/
|
||||
struct tee_ioctl_shm_register_data {
|
||||
__u64 addr;
|
||||
__u64 length;
|
||||
__u32 flags;
|
||||
__s32 id;
|
||||
};
|
||||
|
||||
/**
|
||||
* TEE_IOC_SHM_REGISTER - Register shared memory argument
|
||||
*
|
||||
* Registers shared memory between the user space process and secure OS.
|
||||
*
|
||||
* Returns a file descriptor on success or < 0 on failure
|
||||
*
|
||||
* The shared memory is unregisterred when the descriptor is closed.
|
||||
*/
|
||||
#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \
|
||||
struct tee_ioctl_shm_register_data)
|
||||
/*
|
||||
* Five syscalls are used when communicating with the TEE driver.
|
||||
* open(): opens the device associated with the driver
|
||||
|
Loading…
Reference in New Issue
Block a user