linux/drivers/tee/tee_shm_pool.c
Sumit Garg 0439fcff30 tee: Refactor TEE subsystem header files
Since commit 25559c22ce ("tee: add kernel internal client interface"),
it has been a common include/linux/tee_drv.h header file which is shared
to hold TEE subsystem internal bits along with the APIs exposed to the
TEE client drivers. However, this practice is prone to TEE subsystem
internal APIs abuse and especially so with the new TEE implementation
drivers being added to reuse existing functionality.

In order to address this split TEE subsystem internal bits as a separate
header file: include/linux/tee_core.h which should be the one used by
TEE implementation drivers. With that include/linux/tee_drv.h lists only
APIs exposed by TEE subsystem to the TEE client drivers.

Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
Signed-off-by: Balint Dobszay <balint.dobszay@arm.com>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
2024-04-03 09:19:31 +02:00

93 lines
2.2 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, 2017, 2022 Linaro Limited
*/
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/tee_core.h>
#include "tee_private.h"
static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
size_t size, size_t align)
{
unsigned long va;
struct gen_pool *genpool = pool->private_data;
size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order));
struct genpool_data_align data = { .align = a };
size_t s = roundup(size, a);
va = gen_pool_alloc_algo(genpool, s, gen_pool_first_fit_align, &data);
if (!va)
return -ENOMEM;
memset((void *)va, 0, s);
shm->kaddr = (void *)va;
shm->paddr = gen_pool_virt_to_phys(genpool, va);
shm->size = s;
/*
* This is from a static shared memory pool so no need to register
* each chunk, and no need to unregister later either.
*/
shm->flags &= ~TEE_SHM_DYNAMIC;
return 0;
}
static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm)
{
gen_pool_free(pool->private_data, (unsigned long)shm->kaddr,
shm->size);
shm->kaddr = NULL;
}
static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool)
{
gen_pool_destroy(pool->private_data);
kfree(pool);
}
static const struct tee_shm_pool_ops pool_ops_generic = {
.alloc = pool_op_gen_alloc,
.free = pool_op_gen_free,
.destroy_pool = pool_op_gen_destroy_pool,
};
struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
phys_addr_t paddr, size_t size,
int min_alloc_order)
{
const size_t page_mask = PAGE_SIZE - 1;
struct tee_shm_pool *pool;
int rc;
/* Start and end must be page aligned */
if (vaddr & page_mask || paddr & page_mask || size & page_mask)
return ERR_PTR(-EINVAL);
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(-ENOMEM);
pool->private_data = gen_pool_create(min_alloc_order, -1);
if (!pool->private_data) {
rc = -ENOMEM;
goto err;
}
rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1);
if (rc) {
gen_pool_destroy(pool->private_data);
goto err;
}
pool->ops = &pool_ops_generic;
return pool;
err:
kfree(pool);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);