diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts index 71168077312d5c..3fefc41c43d0d7 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dts +++ b/arch/arm64/boot/dts/arm/foundation-v8.dts @@ -7,15 +7,15 @@ #include "foundation-v8.dtsi" / { - gic: interrupt-controller@2c001000 { + gic: interrupt-controller@2f000000 { compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; #interrupt-cells = <3>; #address-cells = <2>; interrupt-controller; - reg = <0x0 0x2c001000 0 0x1000>, - <0x0 0x2c002000 0 0x2000>, - <0x0 0x2c004000 0 0x2000>, - <0x0 0x2c006000 0 0x2000>; + reg = <0x0 0x2f000000 0 0x10000>, + <0x0 0x2c000000 0 0x2000>, + <0x0 0x2c010000 0 0x2000>, + <0x0 0x2c02F000 0 0x2000>; interrupts = <1 9 0xf04>; }; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 7cfa8e414e7f51..a6eedf1b0bea98 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -19,11 +19,19 @@ aliases { serial0 = &v2m_serial0; - serial1 = &v2m_serial1; serial2 = &v2m_serial2; serial3 = &v2m_serial3; }; + psci { + compatible = "arm,psci"; + method = "smc"; + cpu_suspend = <0xc4000001>; + cpu_off = <0x84000002>; + cpu_on = <0xc4000003>; + }; + + cpus { #address-cells = <2>; #size-cells = <0>; @@ -32,32 +40,28 @@ device_type = "cpu"; compatible = "arm,armv8"; reg = <0x0 0x0>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x8000fff8>; + enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; compatible = "arm,armv8"; reg = <0x0 0x1>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x8000fff8>; + enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; compatible = "arm,armv8"; reg = <0x0 0x2>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x8000fff8>; + enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; compatible = "arm,armv8"; reg = <0x0 0x3>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x8000fff8>; + enable-method = "psci"; next-level-cache = <&L2_0>; }; @@ -72,6 +76,17 @@ <0x00000008 0x80000000 0 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + optee@0x83000000 { + reg = <0x00000000 0x83000000 0 0x01000000>; + no-map; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = <1 13 0xf08>, @@ -202,14 +217,6 @@ clock-names = "uartclk", "apb_pclk"; }; - v2m_serial1: uart@0a0000 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x0a0000 0x1000>; - interrupts = <6>; - clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; - clock-names = "uartclk", "apb_pclk"; - }; - v2m_serial2: uart@0b0000 { compatible = "arm,pl011", "arm,primecell"; reg = <0x0b0000 0x1000>; @@ -233,4 +240,12 @@ }; }; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; + }; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index bfe7d683a42e1b..4eda3fd12e6a46 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -699,6 +699,19 @@ <0x00000008 0x80000000 0x1 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* Shared memory between secure and non-secure world */ + optee@0xfee00000 { + reg = <0x00000000 0xfee00000 0 0x00200000>; + no-map; + }; + }; + + smb@8000000 { compatible = "simple-bus"; #address-cells = <2>; @@ -738,4 +751,11 @@ interrupt-map-mask = <0 0>; interrupt-map = <0 0 &gic 0 0 0 168 IRQ_TYPE_LEVEL_HIGH>; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 1c3634fa94bf4e..64dffc8bbf6f42 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -68,6 +68,13 @@ gpio = <&pio 9 GPIO_ACTIVE_HIGH>; enable-active-high; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; &cec { diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 6922252f317bca..4182b2cbd0657f 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -339,15 +339,6 @@ reg = <0 0x10007000 0 0x100>; }; - timer: timer@10008000 { - compatible = "mediatek,mt8173-timer", - "mediatek,mt6577-timer"; - reg = <0 0x10008000 0 0x1000>; - interrupts = ; - clocks = <&infracfg CLK_INFRA_CLK_13M>, - <&topckgen CLK_TOP_RTC_SEL>; - }; - pwrap: pwrap@1000d000 { compatible = "mediatek,mt8173-pwrap"; reg = <0 0x1000d000 0 0x1000>; diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index a517b2d29f1bb6..075683bd8493e0 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -42,3 +42,35 @@ config ION_CMA_HEAP Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config ION_UNMAPPED_HEAP + bool "ION unmapped heap support" + depends on ION && (ARM || ARM64) + help + Choose this option to enable UNMAPPED heaps with Ion. This heap is + backed in specific memory pools, carveout from the Linux memory. + Unless you know your system has these regions, you should say N here. + +config ION_DUMMY_UNMAPPED_HEAP + bool "ION dummy driver define an ION unmapped heap" + depends on ION_UNMAPPED_HEAP + help + Dummy ION driver will create a unmapped heap from physical + location provided through CONFIG_ION_DUMMY_UNMAPPED_BASE and + CONFIG_ION_DUMMY_UNMAPPED_SIZE. + +config ION_DUMMY_UNMAPPED_BASE + hex "Physical base address of the ION unmapped heap" + depends on ION_DUMMY_UNMAPPED_HEAP + default 0 + help + Allows one the statically define an unmapped heap from the + ION dummy driver to exercice unamped heaps buffer managment. + +config ION_DUMMY_UNMAPPED_SIZE + hex "Physical byte size of the ION unmapped heap" + depends on ION_DUMMY_UNMAPPED_HEAP + default 0 + help + Allows one the statically define an unmapped heap from the + ION dummy driver to exercice unamped heaps buffer managment. diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index eb7eeed6ae409b..4dae85e58f2459 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o +obj-$(CONFIG_ION_UNMAPPED_HEAP) += ion_unmapped_heap.o diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 03d3a4fce0e298..051e0a283f095d 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -218,6 +218,7 @@ struct ion_dma_buf_attachment { struct device *dev; struct sg_table *table; struct list_head list; + bool no_map; }; static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, @@ -237,6 +238,9 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, return -ENOMEM; } + if (buffer->heap->type == ION_HEAP_TYPE_UNMAPPED) + a->no_map = true; + a->table = table; a->dev = dev; INIT_LIST_HEAD(&a->list); @@ -274,6 +278,9 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, table = a->table; + if (a->no_map) + return table; + if (!dma_map_sg(attachment->dev, table->sgl, table->nents, direction)){ ret = -ENOMEM; @@ -290,6 +297,11 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { + struct ion_dma_buf_attachment *a = attachment->priv; + + if (a->no_map) + return; + dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); } diff --git a/drivers/staging/android/ion/ion_unmapped_heap.c b/drivers/staging/android/ion/ion_unmapped_heap.c new file mode 100644 index 00000000000000..fe8eae5a6f05d9 --- /dev/null +++ b/drivers/staging/android/ion/ion_unmapped_heap.c @@ -0,0 +1,260 @@ +/* + * drivers/staging/android/ion/ion_unmapped_heap.c + * + * Copyright (C) 2016-2017 Linaro, Inc. + * Copyright (C) Allwinner 2014 + * Author: for Allwinner. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * ION heap type for handling physical memory heap not mapped + * in the linux-based OS. + * + * "unmapped heap" buffers are default not mapped but buffer owner + * can explicitly request mapping for some specific purpose. + * + * Based on Allwinner work (allocation thru gen_pool) and + * HiSilicon work (create ION heaps from DT nodes, + * Author: Chen Feng ). + */ + +#include +#include +#include +#include + +#include "ion.h" + +/* + * TODO: non-contigous unammped heaps: + * - add a flag to specify contiguity constraint? + * - define antoher heap type that allocate to the smae pool(s)? + */ + +struct ion_unmapped_heap { + struct ion_heap heap; + struct gen_pool *pool; + phys_addr_t base; + size_t size; +}; + +struct unmapped_buffer_priv { + phys_addr_t base; +}; + +static phys_addr_t get_buffer_base(struct unmapped_buffer_priv *priv) +{ + return priv->base; +} + +static struct device *heap2dev(struct ion_heap *heap) +{ + return heap->dev->dev.this_device; +} + +static phys_addr_t ion_unmapped_allocate(struct ion_heap *heap, + unsigned long size, + phys_addr_t *addr) +{ + struct ion_unmapped_heap *umh = + container_of(heap, struct ion_unmapped_heap, heap); + unsigned long offset = gen_pool_alloc(umh->pool, size); + + if (!offset) { + dev_err(heap2dev(heap), + "%s(%d) err: alloc 0x%08x bytes failed\n", + __func__, __LINE__, (u32)size); + return false; + } + + *addr = offset; + return true; +} + +static void ion_unmapped_free(struct ion_heap *heap, phys_addr_t addr, + unsigned long size) +{ + struct ion_unmapped_heap *umh = + container_of(heap, struct ion_unmapped_heap, heap); + + gen_pool_free(umh->pool, addr, size); +} + +static struct sg_table *ion_unmapped_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct sg_table *table; + int ret; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) { + kfree(table); + return ERR_PTR(ret); + } + sg_set_page(table->sgl, + phys_to_page(get_buffer_base(buffer->priv_virt)), + buffer->size, 0); + + return table; +} + +void ion_unmapped_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + sg_free_table(buffer->sg_table); + kfree(buffer->sg_table); +} + +static int ion_unmapped_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned long flags) +{ + struct unmapped_buffer_priv *priv; + phys_addr_t base; + int rc = -EINVAL; + + if (!ion_unmapped_allocate(heap, size, &base)) + return -ENOMEM; + + priv = devm_kzalloc(heap2dev(heap), sizeof(*priv), GFP_KERNEL); + if (IS_ERR_OR_NULL(priv)) { + rc = -ENOMEM; + goto err; + } + + priv->base = base; + buffer->size = roundup(size, PAGE_SIZE); + buffer->priv_virt = priv; + + buffer->sg_table = ion_unmapped_heap_map_dma(heap, buffer); + if (!buffer->sg_table) { + rc = -ENOMEM; + goto err; + } + sg_dma_address(buffer->sg_table->sgl) = priv->base; + sg_dma_len(buffer->sg_table->sgl) = size; + return 0; +err: + ion_unmapped_free(heap, base, size); + devm_kfree(heap2dev(heap), priv); + buffer->priv_virt = NULL; + return rc; +} + +static void ion_unmapped_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + + + ion_unmapped_heap_unmap_dma(heap, buffer); + ion_unmapped_free(heap, get_buffer_base(buffer->priv_virt), + buffer->size); + devm_kfree(heap2dev(heap), buffer->priv_virt); + buffer->priv_virt = NULL; +} + +static int ion_unmapped_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + phys_addr_t pa = get_buffer_base(buffer->priv_virt); + + /* + * when user call ION_IOC_ALLOC not with ION_FLAG_CACHED, ion_mmap will + * change vma->vm_page_prot to pgprot_writecombine itself, so we do not + * need change to pgprot_writecombine here manually. + */ + return remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(pa) + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static struct ion_heap_ops unmapped_heap_ops = { + .allocate = ion_unmapped_heap_allocate, + .free = ion_unmapped_heap_free, + .map_user = ion_unmapped_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_unmapped_heap_create(struct ion_platform_heap *pheap) +{ + struct ion_unmapped_heap *umh; + + if (pheap->type != ION_HEAP_TYPE_UNMAPPED) + return NULL; + + umh = kzalloc(sizeof(struct ion_unmapped_heap), GFP_KERNEL); + if (!umh) + return ERR_PTR(-ENOMEM); + + umh->pool = gen_pool_create(PAGE_SHIFT, -1); + if (!umh->pool) { + kfree(umh); + return ERR_PTR(-ENOMEM); + } + umh->base = pheap->base; + umh->size = pheap->size; + + gen_pool_add(umh->pool, umh->base, pheap->size, -1); + umh->heap.ops = &unmapped_heap_ops; + umh->heap.type = ION_HEAP_TYPE_UNMAPPED; + + return &umh->heap; +} +EXPORT_SYMBOL(ion_unmapped_heap_create); + +void ion_unmapped_heap_destroy(struct ion_heap *heap) +{ + struct ion_unmapped_heap *umh = + container_of(heap, struct ion_unmapped_heap, heap); + + gen_pool_destroy(umh->pool); + kfree(umh); + umh = NULL; +} +EXPORT_SYMBOL(ion_unmapped_heap_destroy); + +#if defined(CONFIG_ION_DUMMY_UNMAPPED_HEAP) && CONFIG_ION_DUMMY_UNMAPPED_SIZE +#define DUMMY_UNAMMPED_HEAP_NAME "unmapped_contiguous" + +static int ion_add_dummy_unmapped_heaps(void) +{ + struct ion_heap *heap; + const char name[] = DUMMY_UNAMMPED_HEAP_NAME; + struct ion_platform_heap pheap = { + .type = ION_HEAP_TYPE_UNMAPPED, + .base = CONFIG_ION_DUMMY_UNMAPPED_BASE, + .size = CONFIG_ION_DUMMY_UNMAPPED_SIZE, + }; + + heap = ion_unmapped_heap_create(&pheap); + if (IS_ERR(heap)) + return PTR_ERR(heap); + + heap->name = kzalloc(sizeof(name), GFP_KERNEL); + if (IS_ERR(heap->name)) { + kfree(heap); + return PTR_ERR(heap->name); + } + memcpy((char *)heap->name, name, sizeof(name)); + + ion_device_add_heap(heap); + return 0; +} +device_initcall(ion_add_dummy_unmapped_heaps); +#endif diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index b76db1b2e197ce..4173d81cc8d305 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -28,6 +28,8 @@ * carveout heap, allocations are physically * contiguous * @ION_HEAP_TYPE_DMA: memory allocated via DMA API + * @ION_HEAP_TYPE_UNMAPPED: memory not intended to be mapped into the + * linux address space unless for debug cases * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask * is used to identify the heaps, so only 32 * total heap types are supported @@ -38,6 +40,7 @@ enum ion_heap_type { ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_CHUNK, ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_UNMAPPED, ION_HEAP_TYPE_CUSTOM, /* * must be last so device specific heaps always * are at the end of this enum diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 0126de898036cd..f425c960a5a222 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -5,3 +5,18 @@ config OPTEE help This implements the OP-TEE Trusted Execution Environment (TEE) driver. + +config OPTEE_BENCHMARK + bool "OP-TEE Benchmark (EXPERIMENTAL)" + depends on OPTEE + help + This enables benchmarking feature in the OP-TEE Trusted + Execution Environment (TEE) driver. + +config OPTEE_SHM_NUM_PRIV_PAGES + int "Private Shared Memory Pages" + default 1 + depends on OPTEE + help + This sets the number of private shared memory pages to be + used by OP-TEE TEE driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile index 92fe5789bcce29..76b202fcd90612 100644 --- a/drivers/tee/optee/Makefile +++ b/drivers/tee/optee/Makefile @@ -3,3 +3,5 @@ optee-objs += core.o optee-objs += call.o optee-objs += rpc.o optee-objs += supp.o +optee-objs += shm_pool.o +optee-objs += bench.o diff --git a/drivers/tee/optee/bench.c b/drivers/tee/optee/bench.c new file mode 100644 index 00000000000000..9e73b2fb54adc1 --- /dev/null +++ b/drivers/tee/optee/bench.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2017, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include "optee_bench.h" + +/* + * Specific defines for ARM performance timers + */ +/* aarch32 */ +#define OPTEE_BENCH_DEF_OPTS (1 | 16) +#define OPTEE_BENCH_DEF_OVER 0x8000000f +/* enable 64 divider for CCNT */ +#define OPTEE_BENCH_DIVIDER_OPTS (OPTEE_BENCH_DEF_OPTS | 8) + +/* aarch64 */ +#define OPTEE_BENCH_ARMV8_PMCR_MASK 0x3f +#define OPTEE_BENCH_ARMV8_PMCR_E (1 << 0) /* Enable all counters */ +#define OPTEE_BENCH_ARMV8_PMCR_P (1 << 1) /* Reset all counters */ +#define OPTEE_BENCH_ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ +#define OPTEE_BENCH_ARMV8_PMCR_D (1 << 3) /* 64 divider */ + +#define OPTEE_BENCH_ARMV8_PMUSERENR_EL0 (1 << 0) /* EL0 access enable */ +#define OPTEE_BENCH_ARMV8_PMUSERENR_CR (1 << 2) /* CCNT read enable */ + +struct optee_ts_global *optee_bench_ts_global; +struct rw_semaphore optee_bench_ts_rwsem; + +#ifdef CONFIG_OPTEE_BENCHMARK +static inline u32 armv8pmu_pmcr_read(void) +{ + u32 val = 0; + + asm volatile("mrs %0, pmcr_el0" : "=r"(val)); + + return (u32)val; +} + +static inline void armv8pmu_pmcr_write(u32 val) +{ + val &= OPTEE_BENCH_ARMV8_PMCR_MASK; + asm volatile("msr pmcr_el0, %0" :: "r"((u64)val)); +} + +static inline u64 read_ccounter(void) +{ + u64 ccounter; + +#ifdef __aarch64__ + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(ccounter)); +#else + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(ccounter)); +#endif + + return ccounter * OPTEE_BENCH_DIVIDER; +} + +static void optee_pmu_setup(void *data) +{ +#ifdef __aarch64__ + /* Enable EL0 access to PMU counters. */ + asm volatile("msr pmuserenr_el0, %0" :: "r"((u64) + OPTEE_BENCH_ARMV8_PMUSERENR_EL0 | + OPTEE_BENCH_ARMV8_PMUSERENR_CR)); + /* Enable PMU counters */ + armv8pmu_pmcr_write(OPTEE_BENCH_ARMV8_PMCR_P | + OPTEE_BENCH_ARMV8_PMCR_C | + OPTEE_BENCH_ARMV8_PMCR_D); + asm volatile("msr pmcntenset_el0, %0" :: "r"((u64)(1 << 31))); + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | + OPTEE_BENCH_ARMV8_PMCR_E); +#else + /* Enable EL0 access to PMU counters */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1)); + /* Enable all PMU counters */ + asm volatile("mcr p15, 0, %0, c9, c12, 0" :: "r" + (OPTEE_BENCH_DIVIDER_OPTS)); + /* Disable counter overflow interrupts */ + asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(OPTEE_BENCH_DEF_OVER)); +#endif +} + +static void optee_pmu_disable(void *data) +{ +#ifdef __aarch64__ + /* Disable EL0 access */ + asm volatile("msr pmuserenr_el0, %0" :: "r"((u64)0)); + /* Disable PMU counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ~OPTEE_BENCH_ARMV8_PMCR_E); +#else + /* Disable all PMU counters */ + asm volatile("mcr p15, 0, %0, c9, c12, 0" :: "r"(0)); + /* Enable counter overflow interrupts */ + asm volatile("mcr p15, 0, %0, c9, c12, 2" :: "r"(OPTEE_BENCH_DEF_OVER)); + /* Disable EL0 access to PMU counters. */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(0)); +#endif +} + +void optee_bm_enable(void) +{ + on_each_cpu(optee_pmu_setup, NULL, 1); +} + +void optee_bm_disable(void) +{ + on_each_cpu(optee_pmu_disable, NULL, 1); +} + +void optee_bm_timestamp(void) +{ + struct optee_ts_cpu_buf *cpu_buf; + struct optee_time_st ts_data; + uint64_t ts_i; + void *ret_addr; + int cur_cpu = 0; + int ret; + + down_read(&optee_bench_ts_rwsem); + + if (!optee_bench_ts_global) { + up_read(&optee_bench_ts_rwsem); + return; + } + + cur_cpu = get_cpu(); + + if (cur_cpu >= optee_bench_ts_global->cores) { + put_cpu(); + up_read(&optee_bench_ts_rwsem); + return; + } + + ret_addr = __builtin_return_address(0); + + cpu_buf = &optee_bench_ts_global->cpu_buf[cur_cpu]; + ts_i = __sync_fetch_and_add(&cpu_buf->head, 1); + ts_data.cnt = read_ccounter(); + ts_data.addr = (uintptr_t)ret_addr; + ts_data.src = OPTEE_BENCH_KMOD; + cpu_buf->stamps[ts_i & OPTEE_BENCH_MAX_MASK] = ts_data; + + up_read(&optee_bench_ts_rwsem); + + put_cpu(); +} +#endif /* CONFIG_OPTEE_BENCHMARK */ diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index f7b7b404c990bf..0d8605dde0eb0a 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -15,12 +15,14 @@ #include #include #include +#include #include #include #include #include #include "optee_private.h" #include "optee_smc.h" +#include "optee_bench.h" struct optee_call_waiter { struct list_head list_node; @@ -135,6 +137,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_call_waiter w; struct optee_rpc_param param = { }; + struct optee_call_ctx call_ctx = { }; u32 ret; param.a0 = OPTEE_SMC_CALL_WITH_ARG; @@ -144,10 +147,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) while (true) { struct arm_smccc_res res; + optee_bm_timestamp(); + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, param.a4, param.a5, param.a6, param.a7, &res); + optee_bm_timestamp(); + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { /* * Out of threads in secure world, wait for a thread @@ -159,13 +166,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) param.a1 = res.a1; param.a2 = res.a2; param.a3 = res.a3; - optee_handle_rpc(ctx, ¶m); + optee_handle_rpc(ctx, ¶m, &call_ctx); } else { ret = res.a0; break; } } + optee_rpc_finalize_call(&call_ctx); /* * We're done with our thread in secure world, if there's any * thread waiters wake up one. @@ -442,3 +450,218 @@ void optee_disable_shm_cache(struct optee *optee) } optee_cq_wait_final(&optee->call_queue, &w); } + +#define PAGELIST_ENTRIES_PER_PAGE \ + ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) + +/** + * optee_fill_pages_list() - write list of user pages to given shared + * buffer. + * + * @dst: page-aligned buffer where list of pages will be stored + * @pages: array of pages that represents shared buffer + * @num_pages: number of entries in @pages + * @page_offset: offset of user buffer from page start + * + * @dst should be big enough to hold list of user page addresses and + * links to the next pages of buffer + */ +void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, + size_t page_offset) +{ + int n = 0; + phys_addr_t optee_page; + /* + * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h + * for details. + */ + struct { + u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; + u64 next_page_data; + } *pages_data; + + /* + * Currently OP-TEE uses 4k page size and it does not looks + * like this will change in the future. On other hand, there are + * no know ARM architectures with page size < 4k. + * Thus the next built assert looks redundant. But the following + * code heavily relies on this assumption, so it is better be + * safe than sorry. + */ + BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); + + pages_data = (void *)dst; + /* + * If linux page is bigger than 4k, and user buffer offset is + * larger than 4k/8k/12k/etc this will skip first 4k pages, + * because they bear no value data for OP-TEE. + */ + optee_page = page_to_phys(*pages) + + round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); + + while (true) { + pages_data->pages_list[n++] = optee_page; + + if (n == PAGELIST_ENTRIES_PER_PAGE) { + pages_data->next_page_data = + virt_to_phys(pages_data + 1); + pages_data++; + n = 0; + } + + optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; + if (!(optee_page & ~PAGE_MASK)) { + if (!--num_pages) + break; + pages++; + optee_page = page_to_phys(*pages); + } + } +} + +/* + * The final entry in each pagelist page is a pointer to the next + * pagelist page. + */ +static size_t get_pages_list_size(size_t num_entries) +{ + int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); + + return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; +} + +u64 *optee_allocate_pages_list(size_t num_entries) +{ + return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); +} + +void optee_free_pages_list(void *list, size_t num_entries) +{ + free_pages_exact(list, get_pages_list_size(num_entries)); +} + +static bool is_normal_memory(pgprot_t p) +{ +#if defined(CONFIG_ARM) + return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC; +#elif defined(CONFIG_ARM64) + return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); +#else +#error "Unuspported architecture" +#endif +} + +static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) +{ + while (vma && is_normal_memory(vma->vm_page_prot)) { + if (vma->vm_end >= end) + return 0; + vma = vma->vm_next; + } + + return -EINVAL; +} + +static int check_mem_type(unsigned long start, size_t num_pages) +{ + struct mm_struct *mm = current->mm; + int rc; + + down_read(&mm->mmap_sem); + rc = __check_mem_type(find_vma(mm, start), + start + num_pages * PAGE_SIZE); + up_read(&mm->mmap_sem); + + return rc; +} + +int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start) +{ + struct tee_shm *shm_arg = NULL; + struct optee_msg_arg *msg_arg; + u64 *pages_list; + phys_addr_t msg_parg; + int rc; + + if (!num_pages) + return -EINVAL; + + rc = check_mem_type(start, num_pages); + if (rc) + return rc; + + pages_list = optee_allocate_pages_list(num_pages); + if (!pages_list) + return -ENOMEM; + + shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); + if (IS_ERR(shm_arg)) { + rc = PTR_ERR(shm_arg); + goto out; + } + + optee_fill_pages_list(pages_list, pages, num_pages, + tee_shm_get_page_offset(shm)); + + msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; + msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | + OPTEE_MSG_ATTR_NONCONTIG; + msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; + msg_arg->params->u.tmem.size = tee_shm_get_size(shm); + /* + * In the least bits of msg_arg->params->u.tmem.buf_ptr we + * store buffer offset from 4k page, as described in OP-TEE ABI. + */ + msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | + (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); + + if (optee_do_call_with_arg(ctx, msg_parg) || + msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + + tee_shm_free(shm_arg); +out: + optee_free_pages_list(pages_list, num_pages); + return rc; +} + +int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_shm *shm_arg; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + int rc = 0; + + shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); + if (IS_ERR(shm_arg)) + return PTR_ERR(shm_arg); + + msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; + + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; + + if (optee_do_call_with_arg(ctx, msg_parg) || + msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + tee_shm_free(shm_arg); + return rc; +} + +int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start) +{ + /* + * We don't want to register supplicant memory in OP-TEE. + * Instead information about it will be passed in RPC code. + */ + return check_mem_type(start, num_pages); +} + +int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) +{ + return 0; +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 58169e519422d3..8d3be6b1bb96af 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -26,12 +26,14 @@ #include #include #include +#include "optee_bench.h" #include "optee_private.h" #include "optee_smc.h" +#include "shm_pool.h" #define DRIVER_NAME "optee" -#define OPTEE_SHM_NUM_PRIV_PAGES 1 +#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES /** * optee_from_msg_param() - convert from OPTEE_MSG parameters to @@ -97,6 +99,25 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params, return rc; } break; + case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + p->u.memref.size = mp->u.rmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.rmem.shm_ref; + + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + p->u.memref.shm_offs = mp->u.rmem.offs; + p->u.memref.shm = shm; + + break; + default: return -EINVAL; } @@ -104,6 +125,46 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params, return 0; } +static int to_msg_param_tmp_mem(struct optee_msg_param *mp, + const struct tee_param *p) +{ + int rc; + phys_addr_t pa; + + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.tmem.size = p->u.memref.size; + + if (!p->u.memref.shm) { + mp->u.tmem.buf_ptr = 0; + return 0; + } + + rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); + if (rc) + return rc; + + mp->u.tmem.buf_ptr = pa; + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << + OPTEE_MSG_ATTR_CACHE_SHIFT; + + return 0; +} + +static int to_msg_param_reg_mem(struct optee_msg_param *mp, + const struct tee_param *p) +{ + mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + + mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.rmem.size = p->u.memref.size; + mp->u.rmem.offs = p->u.memref.shm_offs; + return 0; +} + /** * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters * @msg_params: OPTEE_MSG parameters @@ -116,7 +177,6 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, { int rc; size_t n; - phys_addr_t pa; for (n = 0; n < num_params; n++) { const struct tee_param *p = params + n; @@ -139,22 +199,12 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + - p->attr - - TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; - mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; - mp->u.tmem.size = p->u.memref.size; - if (!p->u.memref.shm) { - mp->u.tmem.buf_ptr = 0; - break; - } - rc = tee_shm_get_pa(p->u.memref.shm, - p->u.memref.shm_offs, &pa); + if (tee_shm_is_registered(p->u.memref.shm)) + rc = to_msg_param_reg_mem(mp, p); + else + rc = to_msg_param_tmp_mem(mp, p); if (rc) return rc; - mp->u.tmem.buf_ptr = pa; - mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << - OPTEE_MSG_ATTR_CACHE_SHIFT; break; default: return -EINVAL; @@ -171,6 +221,10 @@ static void optee_get_version(struct tee_device *teedev, .impl_caps = TEE_OPTEE_CAP_TZ, .gen_caps = TEE_GEN_CAP_GP, }; + struct optee *optee = tee_get_drvdata(teedev); + + if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) + v.gen_caps |= TEE_GEN_CAP_REG_MEM; *vers = v; } @@ -187,12 +241,12 @@ static int optee_open(struct tee_context *ctx) if (teedev == optee->supp_teedev) { bool busy = true; - mutex_lock(&optee->supp.ctx_mutex); + mutex_lock(&optee->supp.mutex); if (!optee->supp.ctx) { busy = false; optee->supp.ctx = ctx; } - mutex_unlock(&optee->supp.ctx_mutex); + mutex_unlock(&optee->supp.mutex); if (busy) { kfree(ctxdata); return -EBUSY; @@ -224,13 +278,14 @@ static void optee_release(struct tee_context *ctx) if (!IS_ERR(shm)) { arg = tee_shm_get_va(shm, 0); /* - * If va2pa fails for some reason, we can't call - * optee_close_session(), only free the memory. Secure OS - * will leak sessions and finally refuse more sessions, but - * we will at least let normal world reclaim its memory. + * If va2pa fails for some reason, we can't call into + * secure world, only free the memory. Secure OS will leak + * sessions and finally refuse more sessions, but we will + * at least let normal world reclaim its memory. */ if (!IS_ERR(arg)) - tee_shm_va2pa(shm, arg, &parg); + if (tee_shm_va2pa(shm, arg, &parg)) + arg = NULL; /* prevent usage of parg below */ } list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, @@ -251,14 +306,11 @@ static void optee_release(struct tee_context *ctx) ctx->data = NULL; - if (teedev == optee->supp_teedev) { - mutex_lock(&optee->supp.ctx_mutex); - optee->supp.ctx = NULL; - mutex_unlock(&optee->supp.ctx_mutex); - } + if (teedev == optee->supp_teedev) + optee_supp_release(&optee->supp); } -static struct tee_driver_ops optee_ops = { +static const struct tee_driver_ops optee_ops = { .get_version = optee_get_version, .open = optee_open, .release = optee_release, @@ -266,23 +318,27 @@ static struct tee_driver_ops optee_ops = { .close_session = optee_close_session, .invoke_func = optee_invoke_func, .cancel_req = optee_cancel_req, + .shm_register = optee_shm_register, + .shm_unregister = optee_shm_unregister, }; -static struct tee_desc optee_desc = { +static const struct tee_desc optee_desc = { .name = DRIVER_NAME "-clnt", .ops = &optee_ops, .owner = THIS_MODULE, }; -static struct tee_driver_ops optee_supp_ops = { +static const struct tee_driver_ops optee_supp_ops = { .get_version = optee_get_version, .open = optee_open, .release = optee_release, .supp_recv = optee_supp_recv, .supp_send = optee_supp_send, + .shm_register = optee_shm_register_supp, + .shm_unregister = optee_shm_unregister_supp, }; -static struct tee_desc optee_supp_desc = { +static const struct tee_desc optee_supp_desc = { .name = DRIVER_NAME "-supp", .ops = &optee_supp_ops, .owner = THIS_MODULE, @@ -301,6 +357,27 @@ static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) return false; } +static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_call_get_os_revision_result result; + } res = { + .result = { + .build_id = 0 + } + }; + + invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.build_id) + pr_info("revision %lu.%lu (%08lx)", res.result.major, + res.result.minor, res.result.build_id); + else + pr_info("revision %lu.%lu", res.result.major, res.result.minor); +} + static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) { union { @@ -344,21 +421,22 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, } static struct tee_shm_pool * -optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) +optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm, + u32 sec_caps) { union { struct arm_smccc_res smccc; struct optee_smc_get_shm_config_result result; } res; - struct tee_shm_pool *pool; unsigned long vaddr; phys_addr_t paddr; size_t size; phys_addr_t begin; phys_addr_t end; void *va; - struct tee_shm_pool_mem_info priv_info; - struct tee_shm_pool_mem_info dmabuf_info; + struct tee_shm_pool_mgr *priv_mgr; + struct tee_shm_pool_mgr *dmabuf_mgr; + void *rc; invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); if (res.result.status != OPTEE_SMC_RETURN_OK) { @@ -388,22 +466,49 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) } vaddr = (unsigned long)va; - priv_info.vaddr = vaddr; - priv_info.paddr = paddr; - priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; - dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; - dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; - dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; - - pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); - if (IS_ERR(pool)) { - memunmap(va); - goto out; + /* + * If OP-TEE can work with unregistered SHM, we will use own pool + * for private shm + */ + if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) { + rc = optee_shm_pool_alloc_pages(); + if (IS_ERR(rc)) + goto err_memunmap; + priv_mgr = rc; + } else { + const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz, + 3 /* 8 bytes aligned */); + if (IS_ERR(rc)) + goto err_memunmap; + priv_mgr = rc; + + vaddr += sz; + paddr += sz; + size -= sz; } + rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT); + if (IS_ERR(rc)) + goto err_free_priv_mgr; + dmabuf_mgr = rc; + + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); + if (IS_ERR(rc)) + goto err_free_dmabuf_mgr; + *memremaped_shm = va; -out: - return pool; + + return rc; + +err_free_dmabuf_mgr: + tee_shm_pool_mgr_destroy(dmabuf_mgr); +err_free_priv_mgr: + tee_shm_pool_mgr_destroy(priv_mgr); +err_memunmap: + memunmap(va); + return rc; } /* Simple wrapper functions to be able to use a function pointer */ @@ -464,6 +569,8 @@ static struct optee *optee_probe(struct device_node *np) return ERR_PTR(-EINVAL); } + optee_msg_get_os_revision(invoke_fn); + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); return ERR_PTR(-EINVAL); @@ -481,7 +588,7 @@ static struct optee *optee_probe(struct device_node *np) if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) return ERR_PTR(-EINVAL); - pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps); if (IS_ERR(pool)) return (void *)pool; @@ -492,6 +599,7 @@ static struct optee *optee_probe(struct device_node *np) } optee->invoke_fn = invoke_fn; + optee->sec_caps = sec_caps; teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); if (IS_ERR(teedev)) { @@ -524,6 +632,9 @@ static struct optee *optee_probe(struct device_node *np) optee_enable_shm_cache(optee); + if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) + pr_info("dynamic shared memory is enabled\n"); + pr_info("initialized driver\n"); return optee; err: @@ -589,7 +700,6 @@ static int __init optee_driver_init(void) return -ENODEV; np = of_find_matching_node(fw_np, optee_match); - of_node_put(fw_np); if (!np) return -ENODEV; @@ -601,6 +711,8 @@ static int __init optee_driver_init(void) optee_svc = optee; + optee_bm_enable(); + return 0; } module_init(optee_driver_init); @@ -612,6 +724,8 @@ static void __exit optee_driver_exit(void) optee_svc = NULL; if (optee) optee_remove(optee); + + optee_bm_disable(); } module_exit(optee_driver_exit); diff --git a/drivers/tee/optee/optee_bench.h b/drivers/tee/optee/optee_bench.h new file mode 100644 index 00000000000000..985e6a011f58f7 --- /dev/null +++ b/drivers/tee/optee/optee_bench.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _OPTEE_BENCH_H +#define _OPTEE_BENCH_H + +#include + +/* + * Cycle count divider is enabled (in PMCR), + * CCNT value is incremented every 64th clock cycle + */ +#define OPTEE_BENCH_DIVIDER 64 + +/* max amount of timestamps */ +#define OPTEE_BENCH_MAX_STAMPS 32 +#define OPTEE_BENCH_MAX_MASK (OPTEE_BENCH_MAX_STAMPS - 1) + +/* OP-TEE susbsystems ids */ +#define OPTEE_BENCH_KMOD 0x20000000 + +#define OPTEE_MSG_RPC_CMD_BENCH_REG_NEW 0 +#define OPTEE_MSG_RPC_CMD_BENCH_REG_DEL 1 + +/* storing timestamp */ +struct optee_time_st { + uint64_t cnt; /* stores value from CNTPCT register */ + uint64_t addr; /* stores value from program counter register */ + uint64_t src; /* OP-TEE subsystem id */ +}; + +/* per-cpu circular buffer for timestamps */ +struct optee_ts_cpu_buf { + uint64_t head; + uint64_t tail; + struct optee_time_st stamps[OPTEE_BENCH_MAX_STAMPS]; +}; + +/* memory layout for shared memory, where timestamps will be stored */ +struct optee_ts_global { + uint64_t cores; + struct optee_ts_cpu_buf cpu_buf[]; +}; + +extern struct optee_ts_global *optee_bench_ts_global; +extern struct rw_semaphore optee_bench_ts_rwsem; + +#ifdef CONFIG_OPTEE_BENCHMARK +void optee_bm_enable(void); +void optee_bm_disable(void); +void optee_bm_timestamp(void); +#else +static inline void optee_bm_enable(void) {} +static inline void optee_bm_disable(void) {} +static inline void optee_bm_timestamp(void) {} +#endif /* CONFIG_OPTEE_BENCHMARK */ +#endif /* _OPTEE_BENCH_H */ diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h index dd7a06ee04629e..8f9669a557c20a 100644 --- a/drivers/tee/optee/optee_msg.h +++ b/drivers/tee/optee/optee_msg.h @@ -67,11 +67,32 @@ #define OPTEE_MSG_ATTR_META BIT(8) /* - * The temporary shared memory object is not physically contigous and this - * temp memref is followed by another fragment until the last temp memref - * that doesn't have this bit set. + * Pointer to a list of pages used to register user-defined SHM buffer. + * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*. + * buf_ptr should point to the beginning of the buffer. Buffer will contain + * list of page addresses. OP-TEE core can reconstruct contiguous buffer from + * that page addresses list. Page addresses are stored as 64 bit values. + * Last entry on a page should point to the next page of buffer. + * Every entry in buffer should point to a 4k page beginning (12 least + * significant bits must be equal to zero). + * + * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page + * offset of the user buffer. + * + * So, entries should be placed like members of this structure: + * + * struct page_data { + * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1]; + * uint64_t next_page_data; + * }; + * + * Structure is designed to exactly fit into the page size + * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page. + * + * The size of 4KB is chosen because this is the smallest page size for ARM + * architectures. If REE uses larger pages, it should divide them to 4KB ones. */ -#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) +#define OPTEE_MSG_ATTR_NONCONTIG BIT(9) /* * Memory attributes for caching passed with temp memrefs. The actual value @@ -94,6 +115,11 @@ #define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 #define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 +/* + * Page size used in non-contiguous buffer entries + */ +#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096 + /** * struct optee_msg_param_tmem - temporary memory reference parameter * @buf_ptr: Address of the buffer @@ -145,8 +171,8 @@ struct optee_msg_param_value { * * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, - * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and - * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem, * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. */ struct optee_msg_param { @@ -415,4 +441,12 @@ struct optee_msg_arg { */ #define OPTEE_MSG_RPC_CMD_SHM_FREE 7 +/* + * Register timestamp buffer + * + * [in] param[0].u.value.a Subcommand (register buffer, unregister buffer) + * [in] param[0].u.value.b Physical address of timestamp buffer + * [in] param[0].u.value.c Size of buffer + */ +#define OPTEE_MSG_RPC_CMD_BENCH_REG 20 #endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index c374cd5943141f..35e79386c556bc 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -53,36 +53,24 @@ struct optee_wait_queue { * @ctx the context of current connected supplicant. * if !NULL the supplicant device is available for use, * else busy - * @ctx_mutex: held while accessing @ctx - * @func: supplicant function id to call - * @ret: call return value - * @num_params: number of elements in @param - * @param: parameters for @func - * @req_posted: if true, a request has been posted to the supplicant - * @supp_next_send: if true, next step is for supplicant to send response - * @thrd_mutex: held by the thread doing a request to supplicant - * @supp_mutex: held by supplicant while operating on this struct - * @data_to_supp: supplicant is waiting on this for next request - * @data_from_supp: requesting thread is waiting on this to get the result + * @mutex: held while accessing content of this struct + * @req_id: current request id if supplicant is doing synchronous + * communication, else -1 + * @reqs: queued request not yet retrieved by supplicant + * @idr: IDR holding all requests currently being processed + * by supplicant + * @reqs_c: completion used by supplicant when waiting for a + * request to be queued. */ struct optee_supp { + /* Serializes access to this struct */ + struct mutex mutex; struct tee_context *ctx; - /* Serializes access of ctx */ - struct mutex ctx_mutex; - - u32 func; - u32 ret; - size_t num_params; - struct tee_param *param; - - bool req_posted; - bool supp_next_send; - /* Serializes access to this struct for requesting thread */ - struct mutex thrd_mutex; - /* Serializes access to this struct for supplicant threads */ - struct mutex supp_mutex; - struct completion data_to_supp; - struct completion data_from_supp; + + int req_id; + struct list_head reqs; + struct idr idr; + struct completion reqs_c; }; /** @@ -96,6 +84,8 @@ struct optee_supp { * @supp: supplicant synchronization struct for RPC to supplicant * @pool: shared memory pool * @memremaped_shm virtual address of memory in shared memory pool + * @sec_caps: secure world capabilities defined by + * OPTEE_SMC_SEC_CAP_* in optee_smc.h */ struct optee { struct tee_device *supp_teedev; @@ -106,6 +96,7 @@ struct optee { struct optee_supp supp; struct tee_shm_pool *pool; void *memremaped_shm; + u32 sec_caps; }; struct optee_session { @@ -130,7 +121,16 @@ struct optee_rpc_param { u32 a7; }; -void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); +/* Holds context that is preserved during one STD call */ +struct optee_call_ctx { + /* information about pages list used in last allocation */ + void *pages_list; + size_t num_entries; +}; + +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, + struct optee_call_ctx *call_ctx); +void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx); void optee_wait_queue_init(struct optee_wait_queue *wq); void optee_wait_queue_exit(struct optee_wait_queue *wq); @@ -142,6 +142,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); void optee_supp_init(struct optee_supp *supp); void optee_supp_uninit(struct optee_supp *supp); +void optee_supp_release(struct optee_supp *supp); int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, struct tee_param *param); @@ -160,11 +161,26 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); void optee_enable_shm_cache(struct optee *optee); void optee_disable_shm_cache(struct optee *optee); +int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); +int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm); + +int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); +int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm); + int optee_from_msg_param(struct tee_param *params, size_t num_params, const struct optee_msg_param *msg_params); int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, const struct tee_param *params); +u64 *optee_allocate_pages_list(size_t num_entries); +void optee_free_pages_list(void *array, size_t num_entries); +void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, + size_t page_offset); + /* * Small helpers */ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index 13b7c98cdf2537..bbf0cf028c1620 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -112,12 +112,20 @@ struct optee_smc_calls_revision_result { * Trusted OS, not of the API. * * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION - * described above. + * described above. May optionally return a 32-bit build identifier in a2, + * with zero meaning unspecified. */ #define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION #define OPTEE_SMC_CALL_GET_OS_REVISION \ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) +struct optee_smc_call_get_os_revision_result { + unsigned long major; + unsigned long minor; + unsigned long build_id; + unsigned long reserved1; +}; + /* * Call with struct optee_msg_arg as argument * @@ -222,6 +230,13 @@ struct optee_smc_get_shm_config_result { #define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) /* Secure world can communicate via previously unregistered shared memory */ #define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) + +/* + * Secure world supports commands "register/unregister shared memory", + * secure world accepts command buffers located in any parts of non-secure RAM + */ +#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2) + #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) @@ -298,7 +313,7 @@ struct optee_smc_disable_shm_cache_result { OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) /* - * Resume from RPC (for example after processing an IRQ) + * Resume from RPC (for example after processing a foreign interrupt) * * Call register usage: * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC @@ -383,19 +398,19 @@ struct optee_smc_disable_shm_cache_result { OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) /* - * Deliver an IRQ in normal world. + * Deliver foreign interrupt to normal world. * * "Call" register usage: - * a0 OPTEE_SMC_RETURN_RPC_IRQ + * a0 OPTEE_SMC_RETURN_RPC_FOREIGN_INTR * a1-7 Resume information, must be preserved * * "Return" register usage: * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. * a1-7 Preserved */ -#define OPTEE_SMC_RPC_FUNC_IRQ 4 -#define OPTEE_SMC_RETURN_RPC_IRQ \ - OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ) +#define OPTEE_SMC_RPC_FUNC_FOREIGN_INTR 4 +#define OPTEE_SMC_RETURN_RPC_FOREIGN_INTR \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FOREIGN_INTR) /* * Do an RPC request. The supplied struct optee_msg_arg tells which diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 8814eca060216a..932b2c2ef2752a 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -16,8 +16,10 @@ #include #include +#include #include #include +#include "optee_bench.h" #include "optee_private.h" #include "optee_smc.h" @@ -140,11 +142,8 @@ static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) msec_to_wait = arg->params[0].u.value.a; - /* set task's state to interruptible sleep */ - set_current_state(TASK_INTERRUPTIBLE); - - /* take a nap */ - msleep(msec_to_wait); + /* Go to interruptible sleep */ + msleep_interruptible(msec_to_wait); arg->ret = TEEC_SUCCESS; return; @@ -195,15 +194,16 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) if (ret) return ERR_PTR(-ENOMEM); - mutex_lock(&optee->supp.ctx_mutex); + mutex_lock(&optee->supp.mutex); /* Increases count as secure world doesn't have a reference */ shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); - mutex_unlock(&optee->supp.ctx_mutex); + mutex_unlock(&optee->supp.mutex); return shm; } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, - struct optee_msg_arg *arg) + struct optee_msg_arg *arg, + struct optee_call_ctx *call_ctx) { phys_addr_t pa; struct tee_shm *shm; @@ -248,10 +248,49 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, goto bad; } - arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; - arg->params[0].u.tmem.buf_ptr = pa; - arg->params[0].u.tmem.size = sz; - arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + sz = tee_shm_get_size(shm); + + if (tee_shm_is_registered(shm)) { + struct page **pages; + u64 *pages_list; + size_t page_num; + + pages = tee_shm_get_pages(shm, &page_num); + if (!pages || !page_num) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + goto bad; + } + + pages_list = optee_allocate_pages_list(page_num); + if (!pages_list) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + goto bad; + } + + call_ctx->pages_list = pages_list; + call_ctx->num_entries = page_num; + + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | + OPTEE_MSG_ATTR_NONCONTIG; + /* + * In the least bits of u.tmem.buf_ptr we store buffer offset + * from 4k page, as described in OP-TEE ABI. + */ + arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | + (tee_shm_get_page_offset(shm) & + (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); + arg->params[0].u.tmem.size = tee_shm_get_size(shm); + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + + optee_fill_pages_list(pages_list, pages, page_num, + tee_shm_get_page_offset(shm)); + } else { + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + arg->params[0].u.tmem.buf_ptr = pa; + arg->params[0].u.tmem.size = sz; + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + } + arg->ret = TEEC_SUCCESS; return; bad: @@ -310,8 +349,68 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, arg->ret = TEEC_SUCCESS; } +static void free_pages_list(struct optee_call_ctx *call_ctx) +{ + if (call_ctx->pages_list) { + optee_free_pages_list(call_ctx->pages_list, + call_ctx->num_entries); + call_ctx->pages_list = NULL; + call_ctx->num_entries = 0; + } +} + +void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) +{ + free_pages_list(call_ctx); +} + +static void handle_rpc_func_cmd_bm_reg(struct optee_msg_arg *arg) +{ + u64 size; + u64 type; + u64 paddr; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + type = arg->params[0].u.value.a; + switch (type) { + case OPTEE_MSG_RPC_CMD_BENCH_REG_NEW: + size = arg->params[0].u.value.c; + paddr = arg->params[0].u.value.b; + down_write(&optee_bench_ts_rwsem); + optee_bench_ts_global = + memremap(paddr, size, MEMREMAP_WB); + if (!optee_bench_ts_global) { + up_write(&optee_bench_ts_rwsem); + goto bad; + } + up_write(&optee_bench_ts_rwsem); + break; + case OPTEE_MSG_RPC_CMD_BENCH_REG_DEL: + down_write(&optee_bench_ts_rwsem); + if (optee_bench_ts_global) + memunmap(optee_bench_ts_global); + optee_bench_ts_global = NULL; + up_write(&optee_bench_ts_rwsem); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, - struct tee_shm *shm) + struct tee_shm *shm, + struct optee_call_ctx *call_ctx) { struct optee_msg_arg *arg; @@ -332,11 +431,15 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, handle_rpc_func_cmd_wait(arg); break; case OPTEE_MSG_RPC_CMD_SHM_ALLOC: - handle_rpc_func_cmd_shm_alloc(ctx, arg); + free_pages_list(call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); break; case OPTEE_MSG_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); break; + case OPTEE_MSG_RPC_CMD_BENCH_REG: + handle_rpc_func_cmd_bm_reg(arg); + break; default: handle_rpc_supp_cmd(ctx, arg); } @@ -346,10 +449,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, * optee_handle_rpc() - handle RPC from secure world * @ctx: context doing the RPC * @param: value of registers for the RPC + * @call_ctx: call context. Preserved during one OP-TEE invocation * * Result of RPC is written back into @param. */ -void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, + struct optee_call_ctx *call_ctx) { struct tee_device *teedev = ctx->teedev; struct optee *optee = tee_get_drvdata(teedev); @@ -374,17 +479,17 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) shm = reg_pair_to_ptr(param->a1, param->a2); tee_shm_free(shm); break; - case OPTEE_SMC_RPC_FUNC_IRQ: + case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: /* - * An IRQ was raised while secure world was executing, - * since all IRQs are handled in Linux a dummy RPC is - * performed to let Linux take the IRQ through the normal + * A foreign interrupt was raised while secure world was + * executing, since they are handled in Linux a dummy RPC is + * performed to let Linux take the interrupt through the normal * vector. */ break; case OPTEE_SMC_RPC_FUNC_CMD: shm = reg_pair_to_ptr(param->a1, param->a2); - handle_rpc_func_cmd(ctx, optee, shm); + handle_rpc_func_cmd(ctx, optee, shm, call_ctx); break; default: pr_warn("Unknown RPC func 0x%x\n", diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c new file mode 100644 index 00000000000000..49397813fff1e5 --- /dev/null +++ b/drivers/tee/optee/shm_pool.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2017, EPAM Systems + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include "optee_private.h" +#include "optee_smc.h" +#include "shm_pool.h" + +static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm, size_t size) +{ + unsigned int order = get_order(size); + struct page *page; + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + shm->kaddr = page_address(page); + shm->paddr = page_to_phys(page); + shm->size = PAGE_SIZE << order; + + return 0; +} + +static void pool_op_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) +{ + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); + shm->kaddr = NULL; +} + +static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +{ + kfree(poolm); +} + +static const struct tee_shm_pool_mgr_ops pool_ops = { + .alloc = pool_op_alloc, + .free = pool_op_free, + .destroy_poolmgr = pool_op_destroy_poolmgr, +}; + +/** + * optee_shm_pool_alloc_pages() - create page-based allocator pool + * + * This pool is used when OP-TEE supports dymanic SHM. In this case + * command buffers and such are allocated from kernel's own memory. + */ +struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) +{ + struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + + if (!mgr) + return ERR_PTR(-ENOMEM); + + mgr->ops = &pool_ops; + + return mgr; +} diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h new file mode 100644 index 00000000000000..4e753c3bf7ec29 --- /dev/null +++ b/drivers/tee/optee/shm_pool.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2016, EPAM Systems + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef SHM_POOL_H +#define SHM_POOL_H + +#include + +struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void); + +#endif diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index b4ea0678a43627..df35fc01fd3e5e 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -16,21 +16,61 @@ #include #include "optee_private.h" +struct optee_supp_req { + struct list_head link; + + bool busy; + u32 func; + u32 ret; + size_t num_params; + struct tee_param *param; + + struct completion c; +}; + void optee_supp_init(struct optee_supp *supp) { memset(supp, 0, sizeof(*supp)); - mutex_init(&supp->ctx_mutex); - mutex_init(&supp->thrd_mutex); - mutex_init(&supp->supp_mutex); - init_completion(&supp->data_to_supp); - init_completion(&supp->data_from_supp); + mutex_init(&supp->mutex); + init_completion(&supp->reqs_c); + idr_init(&supp->idr); + INIT_LIST_HEAD(&supp->reqs); + supp->req_id = -1; } void optee_supp_uninit(struct optee_supp *supp) { - mutex_destroy(&supp->ctx_mutex); - mutex_destroy(&supp->thrd_mutex); - mutex_destroy(&supp->supp_mutex); + mutex_destroy(&supp->mutex); + idr_destroy(&supp->idr); +} + +void optee_supp_release(struct optee_supp *supp) +{ + int id; + struct optee_supp_req *req; + struct optee_supp_req *req_tmp; + + mutex_lock(&supp->mutex); + + /* Abort all request retrieved by supplicant */ + idr_for_each_entry(&supp->idr, req, id) { + req->busy = false; + idr_remove(&supp->idr, id); + req->ret = TEEC_ERROR_COMMUNICATION; + complete(&req->c); + } + + /* Abort all queued requests */ + list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) { + list_del(&req->link); + req->ret = TEEC_ERROR_COMMUNICATION; + complete(&req->c); + } + + supp->ctx = NULL; + supp->req_id = -1; + + mutex_unlock(&supp->mutex); } /** @@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp) */ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct tee_param *param) + { - bool interruptable; struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL); + bool interruptable; u32 ret; - /* - * Other threads blocks here until we've copied our answer from - * supplicant. - */ - while (mutex_lock_interruptible(&supp->thrd_mutex)) { - /* See comment below on when the RPC can be interrupted. */ - mutex_lock(&supp->ctx_mutex); - interruptable = !supp->ctx; - mutex_unlock(&supp->ctx_mutex); - if (interruptable) - return TEEC_ERROR_COMMUNICATION; - } + if (!req) + return TEEC_ERROR_OUT_OF_MEMORY; - /* - * We have exclusive access now since the supplicant at this - * point is either doing a - * wait_for_completion_interruptible(&supp->data_to_supp) or is in - * userspace still about to do the ioctl() to enter - * optee_supp_recv() below. - */ + init_completion(&req->c); + req->func = func; + req->num_params = num_params; + req->param = param; - supp->func = func; - supp->num_params = num_params; - supp->param = param; - supp->req_posted = true; + /* Insert the request in the request list */ + mutex_lock(&supp->mutex); + list_add_tail(&req->link, &supp->reqs); + mutex_unlock(&supp->mutex); - /* Let supplicant get the data */ - complete(&supp->data_to_supp); + /* Tell an eventual waiter there's a new request */ + complete(&supp->reqs_c); /* * Wait for supplicant to process and return result, once we've - * returned from wait_for_completion(data_from_supp) we have + * returned from wait_for_completion(&req->c) successfully we have * exclusive access again. */ - while (wait_for_completion_interruptible(&supp->data_from_supp)) { - mutex_lock(&supp->ctx_mutex); + while (wait_for_completion_interruptible(&req->c)) { + mutex_lock(&supp->mutex); interruptable = !supp->ctx; if (interruptable) { /* * There's no supplicant available and since the - * supp->ctx_mutex currently is held none can + * supp->mutex currently is held none can * become available until the mutex released * again. * @@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, * will serve all requests in a timely manner and * interrupting then wouldn't make sense. */ - supp->ret = TEEC_ERROR_COMMUNICATION; - init_completion(&supp->data_to_supp); + interruptable = !req->busy; + if (!req->busy) + list_del(&req->link); } - mutex_unlock(&supp->ctx_mutex); - if (interruptable) + mutex_unlock(&supp->mutex); + + if (interruptable) { + req->ret = TEEC_ERROR_COMMUNICATION; break; + } } - ret = supp->ret; - supp->param = NULL; - supp->req_posted = false; - - /* We're done, let someone else talk to the supplicant now. */ - mutex_unlock(&supp->thrd_mutex); + ret = req->ret; + kfree(req); return ret; } +static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp, + int num_params, int *id) +{ + struct optee_supp_req *req; + + if (supp->req_id != -1) { + /* + * Supplicant should not mix synchronous and asnynchronous + * requests. + */ + return ERR_PTR(-EINVAL); + } + + if (list_empty(&supp->reqs)) + return NULL; + + req = list_first_entry(&supp->reqs, struct optee_supp_req, link); + + if (num_params < req->num_params) { + /* Not enough room for parameters */ + return ERR_PTR(-EINVAL); + } + + *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL); + if (*id < 0) + return ERR_PTR(-ENOMEM); + + list_del(&req->link); + req->busy = true; + + return req; +} + +static int supp_check_recv_params(size_t num_params, struct tee_param *params, + size_t *num_meta) +{ + size_t n; + + if (!num_params) + return -EINVAL; + + /* + * If there's memrefs we need to decrease those as they where + * increased earlier and we'll even refuse to accept any below. + */ + for (n = 0; n < num_params; n++) + if (tee_param_is_memref(params + n) && params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + + /* + * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with + * or without the TEE_IOCTL_PARAM_ATTR_META bit set. + */ + for (n = 0; n < num_params; n++) + if (params[n].attr && + params[n].attr != TEE_IOCTL_PARAM_ATTR_META) + return -EINVAL; + + /* At most we'll need one meta parameter so no need to check for more */ + if (params->attr == TEE_IOCTL_PARAM_ATTR_META) + *num_meta = 1; + else + *num_meta = 0; + + return 0; +} + /** * optee_supp_recv() - receive request for supplicant * @ctx: context receiving the request @@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, struct tee_device *teedev = ctx->teedev; struct optee *optee = tee_get_drvdata(teedev); struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req = NULL; + int id; + size_t num_meta; int rc; - /* - * In case two threads in one supplicant is calling this function - * simultaneously we need to protect the data with a mutex which - * we'll release before returning. - */ - mutex_lock(&supp->supp_mutex); + rc = supp_check_recv_params(*num_params, param, &num_meta); + if (rc) + return rc; + + while (true) { + mutex_lock(&supp->mutex); + req = supp_pop_entry(supp, *num_params - num_meta, &id); + mutex_unlock(&supp->mutex); + + if (req) { + if (IS_ERR(req)) + return PTR_ERR(req); + break; + } - if (supp->supp_next_send) { /* - * optee_supp_recv() has been called again without - * a optee_supp_send() in between. Supplicant has - * probably been restarted before it was able to - * write back last result. Abort last request and - * wait for a new. + * If we didn't get a request we'll block in + * wait_for_completion() to avoid needless spinning. + * + * This is where supplicant will be hanging most of + * the time, let's make this interruptable so we + * can easily restart supplicant if needed. */ - if (supp->req_posted) { - supp->ret = TEEC_ERROR_COMMUNICATION; - supp->supp_next_send = false; - complete(&supp->data_from_supp); - } + if (wait_for_completion_interruptible(&supp->reqs_c)) + return -ERESTARTSYS; } - /* - * This is where supplicant will be hanging most of the - * time, let's make this interruptable so we can easily - * restart supplicant if needed. - */ - if (wait_for_completion_interruptible(&supp->data_to_supp)) { - rc = -ERESTARTSYS; - goto out; + if (num_meta) { + /* + * tee-supplicant support meta parameters -> requsts can be + * processed asynchronously. + */ + param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | + TEE_IOCTL_PARAM_ATTR_META; + param->u.value.a = id; + param->u.value.b = 0; + param->u.value.c = 0; + } else { + mutex_lock(&supp->mutex); + supp->req_id = id; + mutex_unlock(&supp->mutex); } - /* We have exlusive access to the data */ + *func = req->func; + *num_params = req->num_params + num_meta; + memcpy(param + num_meta, req->param, + sizeof(struct tee_param) * req->num_params); - if (*num_params < supp->num_params) { - /* - * Not enough room for parameters, tell supplicant - * it failed and abort last request. - */ - supp->ret = TEEC_ERROR_COMMUNICATION; - rc = -EINVAL; - complete(&supp->data_from_supp); - goto out; + return 0; +} + +static struct optee_supp_req *supp_pop_req(struct optee_supp *supp, + size_t num_params, + struct tee_param *param, + size_t *num_meta) +{ + struct optee_supp_req *req; + int id; + size_t nm; + const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | + TEE_IOCTL_PARAM_ATTR_META; + + if (!num_params) + return ERR_PTR(-EINVAL); + + if (supp->req_id == -1) { + if (param->attr != attr) + return ERR_PTR(-EINVAL); + id = param->u.value.a; + nm = 1; + } else { + id = supp->req_id; + nm = 0; } - *func = supp->func; - *num_params = supp->num_params; - memcpy(param, supp->param, - sizeof(struct tee_param) * supp->num_params); + req = idr_find(&supp->idr, id); + if (!req) + return ERR_PTR(-ENOENT); - /* Allow optee_supp_send() below to do its work */ - supp->supp_next_send = true; + if ((num_params - nm) != req->num_params) + return ERR_PTR(-EINVAL); - rc = 0; -out: - mutex_unlock(&supp->supp_mutex); - return rc; + req->busy = false; + idr_remove(&supp->idr, id); + supp->req_id = -1; + *num_meta = nm; + + return req; } /** @@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, struct tee_device *teedev = ctx->teedev; struct optee *optee = tee_get_drvdata(teedev); struct optee_supp *supp = &optee->supp; + struct optee_supp_req *req; size_t n; - int rc = 0; + size_t num_meta; - /* - * We still have exclusive access to the data since that's how we - * left it when returning from optee_supp_read(). - */ - - /* See comment on mutex in optee_supp_read() above */ - mutex_lock(&supp->supp_mutex); - - if (!supp->supp_next_send) { - /* - * Something strange is going on, supplicant shouldn't - * enter optee_supp_send() in this state - */ - rc = -ENOENT; - goto out; - } + mutex_lock(&supp->mutex); + req = supp_pop_req(supp, num_params, param, &num_meta); + mutex_unlock(&supp->mutex); - if (num_params != supp->num_params) { - /* - * Something is wrong, let supplicant restart. Next call to - * optee_supp_recv() will give an error to the requesting - * thread and release it. - */ - rc = -EINVAL; - goto out; + if (IS_ERR(req)) { + /* Something is wrong, let supplicant restart. */ + return PTR_ERR(req); } /* Update out and in/out parameters */ - for (n = 0; n < num_params; n++) { - struct tee_param *p = supp->param + n; + for (n = 0; n < req->num_params; n++) { + struct tee_param *p = req->param + n; - switch (p->attr) { + switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: - p->u.value.a = param[n].u.value.a; - p->u.value.b = param[n].u.value.b; - p->u.value.c = param[n].u.value.c; + p->u.value.a = param[n + num_meta].u.value.a; + p->u.value.b = param[n + num_meta].u.value.b; + p->u.value.c = param[n + num_meta].u.value.c; break; case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - p->u.memref.size = param[n].u.memref.size; + p->u.memref.size = param[n + num_meta].u.memref.size; break; default: break; } } - supp->ret = ret; - - /* Allow optee_supp_recv() above to do its work */ - supp->supp_next_send = false; + req->ret = ret; /* Let the requesting thread continue */ - complete(&supp->data_from_supp); -out: - mutex_unlock(&supp->supp_mutex); - return rc; + complete(&req->c); + + return 0; } diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 5c60bf4423e68d..9729c07a43a24c 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -38,15 +38,13 @@ static DEFINE_SPINLOCK(driver_lock); static struct class *tee_class; static dev_t tee_devt; -static int tee_open(struct inode *inode, struct file *filp) +static struct tee_context *teedev_open(struct tee_device *teedev) { int rc; - struct tee_device *teedev; struct tee_context *ctx; - teedev = container_of(inode->i_cdev, struct tee_device, cdev); if (!tee_device_get(teedev)) - return -EINVAL; + return ERR_PTR(-EINVAL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -54,33 +52,67 @@ static int tee_open(struct inode *inode, struct file *filp) goto err; } + kref_init(&ctx->refcount); ctx->teedev = teedev; INIT_LIST_HEAD(&ctx->list_shm); - filp->private_data = ctx; rc = teedev->desc->ops->open(ctx); if (rc) goto err; - return 0; + return ctx; err: kfree(ctx); tee_device_put(teedev); - return rc; + return ERR_PTR(rc); + } -static int tee_release(struct inode *inode, struct file *filp) +void teedev_ctx_get(struct tee_context *ctx) { - struct tee_context *ctx = filp->private_data; - struct tee_device *teedev = ctx->teedev; - struct tee_shm *shm; + if (ctx->releasing) + return; + kref_get(&ctx->refcount); +} + +static void teedev_ctx_release(struct kref *ref) +{ + struct tee_context *ctx = container_of(ref, struct tee_context, + refcount); + ctx->releasing = true; ctx->teedev->desc->ops->release(ctx); - mutex_lock(&ctx->teedev->mutex); - list_for_each_entry(shm, &ctx->list_shm, link) - shm->ctx = NULL; - mutex_unlock(&ctx->teedev->mutex); kfree(ctx); - tee_device_put(teedev); +} + +void teedev_ctx_put(struct tee_context *ctx) +{ + if (ctx->releasing) + return; + + kref_put(&ctx->refcount, teedev_ctx_release); +} + +static void teedev_close_context(struct tee_context *ctx) +{ + tee_device_put(ctx->teedev); + teedev_ctx_put(ctx); +} + +static int tee_open(struct inode *inode, struct file *filp) +{ + struct tee_context *ctx; + + ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev)); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + filp->private_data = ctx; + return 0; +} + +static int tee_release(struct inode *inode, struct file *filp) +{ + teedev_close_context(filp->private_data); return 0; } @@ -90,8 +122,13 @@ static int tee_ioctl_version(struct tee_context *ctx, struct tee_ioctl_version_data vers; ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); + + if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED) + vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED; + if (copy_to_user(uvers, &vers, sizeof(vers))) return -EFAULT; + return 0; } @@ -109,8 +146,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx, if (data.flags) return -EINVAL; - data.id = -1; - shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); if (IS_ERR(shm)) return PTR_ERR(shm); @@ -133,6 +168,79 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx, return ret; } +static int +tee_ioctl_shm_register(struct tee_context *ctx, + struct tee_ioctl_shm_register_data __user *udata) +{ + long ret; + struct tee_ioctl_shm_register_data data; + struct tee_shm *shm; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + shm = tee_shm_register(ctx, data.addr, data.length, + TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + data.id = shm->id; + data.flags = shm->flags; + data.length = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static int tee_ioctl_shm_register_fd(struct tee_context *ctx, + struct tee_ioctl_shm_register_fd_data __user *udata) +{ + struct tee_ioctl_shm_register_fd_data data; + struct tee_shm *shm; + long ret; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + shm = tee_shm_register_fd(ctx, data.fd); + if (IS_ERR_OR_NULL(shm)) + return -EINVAL; + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + static int params_from_user(struct tee_context *ctx, struct tee_param *params, size_t num_params, struct tee_ioctl_param __user *uparams) @@ -147,11 +255,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, return -EFAULT; /* All unused attribute bits has to be zero */ - if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK) return -EINVAL; params[n].attr = ip.attr; - switch (ip.attr) { + switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: break; @@ -176,6 +284,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, if (IS_ERR(shm)) return PTR_ERR(shm); + /* + * Ensure offset + size does not overflow offset + * and does not overflow the size of the referred + * shared memory object. + */ + if ((ip.a + ip.b) < ip.a || + (ip.a + ip.b) > shm->size) { + tee_shm_put(shm); + return -EINVAL; + } + params[n].u.memref.shm_offs = ip.a; params[n].u.memref.size = ip.b; params[n].u.memref.shm = shm; @@ -216,18 +335,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams, return 0; } -static bool param_is_memref(struct tee_param *param) -{ - switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - return true; - default: - return false; - } -} - static int tee_ioctl_open_session(struct tee_context *ctx, struct tee_ioctl_buf_data __user *ubuf) { @@ -291,7 +398,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx, if (params) { /* Decrease ref count for all valid shared memory pointers */ for (n = 0; n < arg.num_params; n++) - if (param_is_memref(params + n) && + if (tee_param_is_memref(params + n) && params[n].u.memref.shm) tee_shm_put(params[n].u.memref.shm); kfree(params); @@ -353,7 +460,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx, if (params) { /* Decrease ref count for all valid shared memory pointers */ for (n = 0; n < arg.num_params; n++) - if (param_is_memref(params + n) && + if (tee_param_is_memref(params + n) && params[n].u.memref.shm) tee_shm_put(params[n].u.memref.shm); kfree(params); @@ -401,8 +508,8 @@ static int params_to_supp(struct tee_context *ctx, struct tee_ioctl_param ip; struct tee_param *p = params + n; - ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; - switch (p->attr) { + ip.attr = p->attr; + switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: ip.a = p->u.value.a; @@ -466,6 +573,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx, if (!params) return -ENOMEM; + rc = params_from_user(ctx, params, num_params, uarg->params); + if (rc) + goto out; + rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); if (rc) goto out; @@ -495,11 +606,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params, return -EFAULT; /* All unused attribute bits has to be zero */ - if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK) return -EINVAL; p->attr = ip.attr; - switch (ip.attr) { + switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: /* Only out and in/out values can be updated */ @@ -581,6 +692,10 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return tee_ioctl_version(ctx, uarg); case TEE_IOC_SHM_ALLOC: return tee_ioctl_shm_alloc(ctx, uarg); + case TEE_IOC_SHM_REGISTER: + return tee_ioctl_shm_register(ctx, uarg); + case TEE_IOC_SHM_REGISTER_FD: + return tee_ioctl_shm_register_fd(ctx, uarg); case TEE_IOC_OPEN_SESSION: return tee_ioctl_open_session(ctx, uarg); case TEE_IOC_INVOKE: @@ -637,7 +752,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, { struct tee_device *teedev; void *ret; - int rc; + int rc, max_id; int offs = 0; if (!teedesc || !teedesc->name || !teedesc->ops || @@ -651,16 +766,20 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, goto err; } - if (teedesc->flags & TEE_DESC_PRIVILEGED) + max_id = TEE_NUM_DEVICES / 2; + + if (teedesc->flags & TEE_DESC_PRIVILEGED) { offs = TEE_NUM_DEVICES / 2; + max_id = TEE_NUM_DEVICES; + } spin_lock(&driver_lock); - teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs); - if (teedev->id < TEE_NUM_DEVICES) + teedev->id = find_next_zero_bit(dev_mask, max_id, offs); + if (teedev->id < max_id) set_bit(teedev->id, dev_mask); spin_unlock(&driver_lock); - if (teedev->id >= TEE_NUM_DEVICES) { + if (teedev->id >= max_id) { ret = ERR_PTR(-ENOMEM); goto err; } @@ -857,6 +976,95 @@ void *tee_get_drvdata(struct tee_device *teedev) } EXPORT_SYMBOL_GPL(tee_get_drvdata); +struct match_dev_data { + struct tee_ioctl_version_data *vers; + const void *data; + int (*match)(struct tee_ioctl_version_data *, const void *); +}; + +static int match_dev(struct device *dev, const void *data) +{ + const struct match_dev_data *match_data = data; + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + + teedev->desc->ops->get_version(teedev, match_data->vers); + return match_data->match(match_data->vers, match_data->data); +} + +struct tee_context * +tee_client_open_context(struct tee_context *start, + int (*match)(struct tee_ioctl_version_data *, + const void *), + const void *data, struct tee_ioctl_version_data *vers) +{ + struct device *dev = NULL; + struct device *put_dev = NULL; + struct tee_context *ctx = NULL; + struct tee_ioctl_version_data v; + struct match_dev_data match_data = { vers ? vers : &v, data, match }; + + if (start) + dev = &start->teedev->dev; + + do { + dev = class_find_device(tee_class, dev, &match_data, match_dev); + if (!dev) { + ctx = ERR_PTR(-ENOENT); + break; + } + + put_device(put_dev); + put_dev = dev; + + ctx = teedev_open(container_of(dev, struct tee_device, dev)); + } while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM); + + put_device(put_dev); + return ctx; +} +EXPORT_SYMBOL_GPL(tee_client_open_context); + +void tee_client_close_context(struct tee_context *ctx) +{ + teedev_close_context(ctx); +} +EXPORT_SYMBOL_GPL(tee_client_close_context); + +void tee_client_get_version(struct tee_context *ctx, + struct tee_ioctl_version_data *vers) +{ + ctx->teedev->desc->ops->get_version(ctx->teedev, vers); +} +EXPORT_SYMBOL_GPL(tee_client_get_version); + +int tee_client_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + if (!ctx->teedev->desc->ops->open_session) + return -EINVAL; + return ctx->teedev->desc->ops->open_session(ctx, arg, param); +} +EXPORT_SYMBOL_GPL(tee_client_open_session); + +int tee_client_close_session(struct tee_context *ctx, u32 session) +{ + if (!ctx->teedev->desc->ops->close_session) + return -EINVAL; + return ctx->teedev->desc->ops->close_session(ctx, session); +} +EXPORT_SYMBOL_GPL(tee_client_close_session); + +int tee_client_invoke_func(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + if (!ctx->teedev->desc->ops->invoke_func) + return -EINVAL; + return ctx->teedev->desc->ops->invoke_func(ctx, arg, param); +} +EXPORT_SYMBOL_GPL(tee_client_invoke_func); + static int __init tee_init(void) { int rc; diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 21cb6be8bce94d..85d99d621603d7 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -21,68 +21,15 @@ #include #include -struct tee_device; - -/** - * struct tee_shm - shared memory object - * @teedev: device used to allocate the object - * @ctx: context using the object, if NULL the context is gone - * @link link element - * @paddr: physical address of the shared memory - * @kaddr: virtual address of the shared memory - * @size: size of shared memory - * @dmabuf: dmabuf used to for exporting to user space - * @flags: defined by TEE_SHM_* in tee_drv.h - * @id: unique id of a shared memory object on this device - */ -struct tee_shm { - struct tee_device *teedev; - struct tee_context *ctx; - struct list_head link; - phys_addr_t paddr; - void *kaddr; - size_t size; - struct dma_buf *dmabuf; - u32 flags; - int id; -}; - -struct tee_shm_pool_mgr; - -/** - * struct tee_shm_pool_mgr_ops - shared memory pool manager operations - * @alloc: called when allocating shared memory - * @free: called when freeing shared memory - */ -struct tee_shm_pool_mgr_ops { - int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, - size_t size); - void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); -}; - -/** - * struct tee_shm_pool_mgr - shared memory manager - * @ops: operations - * @private_data: private data for the shared memory manager - */ -struct tee_shm_pool_mgr { - const struct tee_shm_pool_mgr_ops *ops; - void *private_data; -}; - /** * struct tee_shm_pool - shared memory pool * @private_mgr: pool manager for shared memory only between kernel * and secure world * @dma_buf_mgr: pool manager for shared memory exported to user space - * @destroy: called when destroying the pool - * @private_data: private data for the pool */ struct tee_shm_pool { - struct tee_shm_pool_mgr private_mgr; - struct tee_shm_pool_mgr dma_buf_mgr; - void (*destroy)(struct tee_shm_pool *pool); - void *private_data; + struct tee_shm_pool_mgr *private_mgr; + struct tee_shm_pool_mgr *dma_buf_mgr; }; #define TEE_DEVICE_FLAG_REGISTERED 0x1 @@ -126,4 +73,7 @@ int tee_shm_get_fd(struct tee_shm *shm); bool tee_device_get(struct tee_device *teedev); void tee_device_put(struct tee_device *teedev); +void teedev_ctx_get(struct tee_context *ctx); +void teedev_ctx_put(struct tee_context *ctx); + #endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index d356d7f025eb4c..0d920b12c03342 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -20,10 +20,17 @@ #include #include "tee_private.h" +/* extra references appended to shm object for registered shared memory */ +struct tee_shm_dmabuf_ref { + struct tee_shm shm; + struct dma_buf *dmabuf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; +}; + static void tee_shm_release(struct tee_shm *shm) { struct tee_device *teedev = shm->teedev; - struct tee_shm_pool_mgr *poolm; mutex_lock(&teedev->mutex); idr_remove(&teedev->idr, shm->id); @@ -31,14 +38,41 @@ static void tee_shm_release(struct tee_shm *shm) list_del(&shm->link); mutex_unlock(&teedev->mutex); - if (shm->flags & TEE_SHM_DMA_BUF) - poolm = &teedev->pool->dma_buf_mgr; - else - poolm = &teedev->pool->private_mgr; + if (shm->flags & TEE_SHM_EXT_DMA_BUF) { + struct tee_shm_dmabuf_ref *ref; - poolm->ops->free(poolm, shm); - kfree(shm); + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); + dma_buf_unmap_attachment(ref->attach, ref->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(shm->dmabuf, ref->attach); + dma_buf_put(ref->dmabuf); + } else if (shm->flags & TEE_SHM_POOL) { + struct tee_shm_pool_mgr *poolm; + + if (shm->flags & TEE_SHM_DMA_BUF) + poolm = teedev->pool->dma_buf_mgr; + else + poolm = teedev->pool->private_mgr; + + poolm->ops->free(poolm, shm); + } else if (shm->flags & TEE_SHM_REGISTER) { + size_t n; + int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); + + if (rc) + dev_err(teedev->dev.parent, + "unregister shm %p failed: %d", shm, rc); + + for (n = 0; n < shm->num_pages; n++) + put_page(shm->pages[n]); + kfree(shm->pages); + } + + if (shm->ctx) + teedev_ctx_put(shm->ctx); + + kfree(shm); tee_device_put(teedev); } @@ -76,11 +110,15 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) struct tee_shm *shm = dmabuf->priv; size_t size = vma->vm_end - vma->vm_start; + /* Refuse sharing shared memory provided by application */ + if (shm->flags & TEE_SHM_REGISTER) + return -EINVAL; + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, size, vma->vm_page_prot); } -static struct dma_buf_ops tee_shm_dma_buf_ops = { +static const struct dma_buf_ops tee_shm_dma_buf_ops = { .map_dma_buf = tee_shm_op_map_dma_buf, .unmap_dma_buf = tee_shm_op_unmap_dma_buf, .release = tee_shm_op_release, @@ -89,26 +127,20 @@ static struct dma_buf_ops tee_shm_dma_buf_ops = { .mmap = tee_shm_op_mmap, }; -/** - * tee_shm_alloc() - Allocate shared memory - * @ctx: Context that allocates the shared memory - * @size: Requested size of shared memory - * @flags: Flags setting properties for the requested shared memory. - * - * Memory allocated as global shared memory is automatically freed when the - * TEE file pointer is closed. The @flags field uses the bits defined by - * TEE_SHM_* in . TEE_SHM_MAPPED must currently always be - * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and - * associated with a dma-buf handle, else driver private memory. - */ -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx, + struct tee_device *teedev, + size_t size, u32 flags) { - struct tee_device *teedev = ctx->teedev; struct tee_shm_pool_mgr *poolm = NULL; struct tee_shm *shm; void *ret; int rc; + if (ctx && ctx->teedev != teedev) { + dev_err(teedev->dev.parent, "ctx and teedev mismatch\n"); + return ERR_PTR(-EINVAL); + } + if (!(flags & TEE_SHM_MAPPED)) { dev_err(teedev->dev.parent, "only mapped allocations supported\n"); @@ -135,13 +167,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) goto err_dev_put; } - shm->flags = flags; + shm->flags = flags | TEE_SHM_POOL; shm->teedev = teedev; shm->ctx = ctx; if (flags & TEE_SHM_DMA_BUF) - poolm = &teedev->pool->dma_buf_mgr; + poolm = teedev->pool->dma_buf_mgr; else - poolm = &teedev->pool->private_mgr; + poolm = teedev->pool->private_mgr; rc = poolm->ops->alloc(poolm, shm, size); if (rc) { @@ -171,9 +203,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) goto err_rem; } } - mutex_lock(&teedev->mutex); - list_add_tail(&shm->link, &ctx->list_shm); - mutex_unlock(&teedev->mutex); + + if (ctx) { + teedev_ctx_get(ctx); + mutex_lock(&teedev->mutex); + list_add_tail(&shm->link, &ctx->list_shm); + mutex_unlock(&teedev->mutex); + } return shm; err_rem: @@ -188,8 +224,242 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) tee_device_put(teedev); return ret; } + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* in . TEE_SHM_MAPPED must currently always be + * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and + * associated with a dma-buf handle, else driver private memory. + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +{ + return __tee_shm_alloc(ctx, ctx->teedev, size, flags); +} EXPORT_SYMBOL_GPL(tee_shm_alloc); +struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size) +{ + return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED); +} +EXPORT_SYMBOL_GPL(tee_shm_priv_alloc); + +struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, + size_t length, u32 flags) +{ + struct tee_device *teedev = ctx->teedev; + const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED; + struct tee_shm *shm; + void *ret; + int rc; + int num_pages; + unsigned long start; + + if (flags != req_flags) + return ERR_PTR(-ENOTSUPP); + + if (!tee_device_get(teedev)) + return ERR_PTR(-EINVAL); + + if (!teedev->desc->ops->shm_register || + !teedev->desc->ops->shm_unregister) { + tee_device_put(teedev); + return ERR_PTR(-ENOTSUPP); + } + + teedev_ctx_get(ctx); + + shm = kzalloc(sizeof(*shm), GFP_KERNEL); + if (!shm) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + shm->flags = flags | TEE_SHM_REGISTER; + shm->teedev = teedev; + shm->ctx = ctx; + shm->id = -1; + start = rounddown(addr, PAGE_SIZE); + shm->offset = addr - start; + shm->size = length; + num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE; + shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); + if (!shm->pages) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + rc = get_user_pages_fast(start, num_pages, 1, shm->pages); + if (rc > 0) + shm->num_pages = rc; + if (rc != num_pages) { + if (rc >= 0) + rc = -ENOMEM; + ret = ERR_PTR(rc); + goto err; + } + + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err; + } + + rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, + shm->num_pages, start); + if (rc) { + ret = ERR_PTR(rc); + goto err; + } + + if (flags & TEE_SHM_DMA_BUF) { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = shm->size; + exp_info.flags = O_RDWR; + exp_info.priv = shm; + + shm->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(shm->dmabuf)) { + ret = ERR_CAST(shm->dmabuf); + teedev->desc->ops->shm_unregister(ctx, shm); + goto err; + } + } + + mutex_lock(&teedev->mutex); + list_add_tail(&shm->link, &ctx->list_shm); + mutex_unlock(&teedev->mutex); + + return shm; +err: + if (shm) { + size_t n; + + if (shm->id >= 0) { + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); + } + if (shm->pages) { + for (n = 0; n < shm->num_pages; n++) + put_page(shm->pages[n]); + kfree(shm->pages); + } + } + kfree(shm); + teedev_ctx_put(ctx); + tee_device_put(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_shm_register); + +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd) +{ + struct tee_shm_dmabuf_ref *ref; + void *rc; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + if (!tee_device_get(ctx->teedev)) + return ERR_PTR(-EINVAL); + + teedev_ctx_get(ctx); + + ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!ref) { + rc = ERR_PTR(-ENOMEM); + goto err; + } + + ref->shm.ctx = ctx; + ref->shm.teedev = ctx->teedev; + ref->shm.id = -1; + + ref->dmabuf = dma_buf_get(fd); + if (!ref->dmabuf) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + ref->attach = dma_buf_attach(ref->dmabuf, &ref->shm.teedev->dev); + if (IS_ERR_OR_NULL(ref->attach)) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + ref->sgt = dma_buf_map_attachment(ref->attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(ref->sgt)) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + if (sg_nents(ref->sgt->sgl) != 1) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + ref->shm.paddr = sg_dma_address(ref->sgt->sgl); + ref->shm.size = sg_dma_len(ref->sgt->sgl); + ref->shm.flags = TEE_SHM_DMA_BUF | TEE_SHM_EXT_DMA_BUF; + + mutex_lock(&ref->shm.teedev->mutex); + ref->shm.id = idr_alloc(&ref->shm.teedev->idr, &ref->shm, + 1, 0, GFP_KERNEL); + mutex_unlock(&ref->shm.teedev->mutex); + if (ref->shm.id < 0) { + rc = ERR_PTR(ref->shm.id); + goto err; + } + + /* export a dmabuf to later get a userland ref */ + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = ref->shm.size; + exp_info.flags = O_RDWR; + exp_info.priv = &ref->shm; + + ref->shm.dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(ref->shm.dmabuf)) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + mutex_lock(&ref->shm.teedev->mutex); + list_add_tail(&ref->shm.link, &ctx->list_shm); + mutex_unlock(&ref->shm.teedev->mutex); + + return &ref->shm; + +err: + if (ref) { + if (ref->shm.id >= 0) { + mutex_lock(&ctx->teedev->mutex); + idr_remove(&ctx->teedev->idr, ref->shm.id); + mutex_unlock(&ctx->teedev->mutex); + } + if (ref->sgt) + dma_buf_unmap_attachment(ref->attach, ref->sgt, + DMA_BIDIRECTIONAL); + if (ref->attach) + dma_buf_detach(ref->dmabuf, ref->attach); + if (ref->dmabuf) + dma_buf_put(ref->dmabuf); + } + kfree(ref); + teedev_ctx_put(ctx); + tee_device_put(ctx->teedev); + return rc; +} +EXPORT_SYMBOL_GPL(tee_shm_register_fd); + /** * tee_shm_get_fd() - Increase reference count and return file descriptor * @shm: Shared memory handle @@ -197,10 +467,9 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc); */ int tee_shm_get_fd(struct tee_shm *shm) { - u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF; int fd; - if ((shm->flags & req_flags) != req_flags) + if (!(shm->flags & TEE_SHM_DMA_BUF)) return -EINVAL; fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); @@ -238,6 +507,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free); */ int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) { + if (!(shm->flags & TEE_SHM_MAPPED)) + return -EINVAL; /* Check that we're in the range of the shm */ if ((char *)va < (char *)shm->kaddr) return -EINVAL; @@ -258,6 +529,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa); */ int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) { + if (!(shm->flags & TEE_SHM_MAPPED)) + return -EINVAL; /* Check that we're in the range of the shm */ if (pa < shm->paddr) return -EINVAL; @@ -284,6 +557,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va); */ void *tee_shm_get_va(struct tee_shm *shm, size_t offs) { + if (!(shm->flags & TEE_SHM_MAPPED)) + return ERR_PTR(-EINVAL); if (offs >= shm->size) return ERR_PTR(-EINVAL); return (char *)shm->kaddr + offs; @@ -335,17 +610,6 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) } EXPORT_SYMBOL_GPL(tee_shm_get_from_id); -/** - * tee_shm_get_id() - Get id of a shared memory object - * @shm: Shared memory handle - * @returns id - */ -int tee_shm_get_id(struct tee_shm *shm) -{ - return shm->id; -} -EXPORT_SYMBOL_GPL(tee_shm_get_id); - /** * tee_shm_put() - Decrease reference count on a shared memory handle * @shm: Shared memory handle diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index fb4f8522a5269c..e6d4b9e4a86494 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -44,49 +44,18 @@ static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, shm->kaddr = NULL; } +static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +{ + gen_pool_destroy(poolm->private_data); + kfree(poolm); +} + static const struct tee_shm_pool_mgr_ops pool_ops_generic = { .alloc = pool_op_gen_alloc, .free = pool_op_gen_free, + .destroy_poolmgr = pool_op_gen_destroy_poolmgr, }; -static void pool_res_mem_destroy(struct tee_shm_pool *pool) -{ - gen_pool_destroy(pool->private_mgr.private_data); - gen_pool_destroy(pool->dma_buf_mgr.private_data); -} - -static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr, - struct tee_shm_pool_mem_info *info, - int min_alloc_order) -{ - size_t page_mask = PAGE_SIZE - 1; - struct gen_pool *genpool = NULL; - int rc; - - /* - * Start and end must be page aligned - */ - if ((info->vaddr & page_mask) || (info->paddr & page_mask) || - (info->size & page_mask)) - return -EINVAL; - - genpool = gen_pool_create(min_alloc_order, -1); - if (!genpool) - return -ENOMEM; - - gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); - rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size, - -1); - if (rc) { - gen_pool_destroy(genpool); - return rc; - } - - mgr->private_data = genpool; - mgr->ops = &pool_ops_generic; - return 0; -} - /** * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved * memory range @@ -104,42 +73,109 @@ struct tee_shm_pool * tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, struct tee_shm_pool_mem_info *dmabuf_info) { - struct tee_shm_pool *pool = NULL; - int ret; - - pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!pool) { - ret = -ENOMEM; - goto err; - } + struct tee_shm_pool_mgr *priv_mgr; + struct tee_shm_pool_mgr *dmabuf_mgr; + void *rc; /* * Create the pool for driver private shared memory */ - ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, - 3 /* 8 byte aligned */); - if (ret) - goto err; + rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr, + priv_info->size, + 3 /* 8 byte aligned */); + if (IS_ERR(rc)) + return rc; + priv_mgr = rc; /* * Create the pool for dma_buf shared memory */ - ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, - PAGE_SHIFT); - if (ret) + rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr, + dmabuf_info->paddr, + dmabuf_info->size, PAGE_SHIFT); + if (IS_ERR(rc)) + goto err_free_priv_mgr; + dmabuf_mgr = rc; + + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); + if (IS_ERR(rc)) + goto err_free_dmabuf_mgr; + + return rc; + +err_free_dmabuf_mgr: + tee_shm_pool_mgr_destroy(dmabuf_mgr); +err_free_priv_mgr: + tee_shm_pool_mgr_destroy(priv_mgr); + + return rc; +} +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); + +struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, + size_t size, + int min_alloc_order) +{ + const size_t page_mask = PAGE_SIZE - 1; + struct tee_shm_pool_mgr *mgr; + int rc; + + /* Start and end must be page aligned */ + if (vaddr & page_mask || paddr & page_mask || size & page_mask) + return ERR_PTR(-EINVAL); + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return ERR_PTR(-ENOMEM); + + mgr->private_data = gen_pool_create(min_alloc_order, -1); + if (!mgr->private_data) { + rc = -ENOMEM; goto err; + } - pool->destroy = pool_res_mem_destroy; - return pool; + gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL); + rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1); + if (rc) { + gen_pool_destroy(mgr->private_data); + goto err; + } + + mgr->ops = &pool_ops_generic; + + return mgr; err: - if (ret == -ENOMEM) - pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); - if (pool && pool->private_mgr.private_data) - gen_pool_destroy(pool->private_mgr.private_data); - kfree(pool); - return ERR_PTR(ret); + kfree(mgr); + + return ERR_PTR(rc); } -EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); +EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem); + +static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr) +{ + return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free && + mgr->ops->destroy_poolmgr; +} + +struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, + struct tee_shm_pool_mgr *dmabuf_mgr) +{ + struct tee_shm_pool *pool; + + if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr)) + return ERR_PTR(-EINVAL); + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + pool->private_mgr = priv_mgr; + pool->dma_buf_mgr = dmabuf_mgr; + + return pool; +} +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc); /** * tee_shm_pool_free() - Free a shared memory pool @@ -150,7 +186,10 @@ EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); */ void tee_shm_pool_free(struct tee_shm_pool *pool) { - pool->destroy(pool); + if (pool->private_mgr) + tee_shm_pool_mgr_destroy(pool->private_mgr); + if (pool->dma_buf_mgr) + tee_shm_pool_mgr_destroy(pool->dma_buf_mgr); kfree(pool); } EXPORT_SYMBOL_GPL(tee_shm_pool_free); diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 4c5bca38c6533e..3f1293963c233f 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -14,14 +14,16 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include + /* * This file provides common defines for ARM SMC Calling Convention as * specified in * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html */ -#define ARM_SMCCC_STD_CALL 0 -#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) #define ARM_SMCCC_TYPE_SHIFT 31 #define ARM_SMCCC_SMC_32 0 diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 0f175b8f6456c1..9c4011e4c17f25 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -25,9 +26,14 @@ * specific TEE driver. */ -#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ -#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ +#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ +#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ +#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ +#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ +#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ +struct device; struct tee_device; struct tee_shm; struct tee_shm_pool; @@ -37,11 +43,17 @@ struct tee_shm_pool; * @teedev: pointer to this drivers struct tee_device * @list_shm: List of shared memory object owned by this context * @data: driver specific context data, managed by the driver + * @refcount: reference counter for this structure + * @releasing: flag that indicates if context is being released right now. + * It is needed to break circular dependency on context during + * shared memory release. */ struct tee_context { struct tee_device *teedev; struct list_head list_shm; void *data; + struct kref refcount; + bool releasing; }; struct tee_param_memref { @@ -75,6 +87,8 @@ struct tee_param { * @cancel_req: request cancel of an ongoing invoke or open * @supp_revc: called for supplicant to get a command * @supp_send: called for supplicant to send a response + * @shm_register: register shared memory buffer in TEE + * @shm_unregister: unregister shared memory buffer in TEE */ struct tee_driver_ops { void (*get_version)(struct tee_device *teedev, @@ -93,6 +107,10 @@ struct tee_driver_ops { struct tee_param *param); int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, struct tee_param *param); + int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); + int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); }; /** @@ -148,6 +166,97 @@ int tee_device_register(struct tee_device *teedev); */ void tee_device_unregister(struct tee_device *teedev); +/** + * struct tee_shm - shared memory object + * @teedev: device used to allocate the object + * @ctx: context using the object, if NULL the context is gone + * @link link element + * @paddr: physical address of the shared memory + * @kaddr: virtual address of the shared memory + * @size: size of shared memory + * @offset: offset of buffer in user space + * @pages: locked pages from userspace + * @num_pages: number of locked pages + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h + * @id: unique id of a shared memory object on this device + * + * This pool is only supposed to be accessed directly from the TEE + * subsystem and from drivers that implements their own shm pool manager. + */ +struct tee_shm { + struct tee_device *teedev; + struct tee_context *ctx; + struct list_head link; + phys_addr_t paddr; + void *kaddr; + size_t size; + unsigned int offset; + struct page **pages; + size_t num_pages; + struct dma_buf *dmabuf; + u32 flags; + int id; +}; + +/** + * struct tee_shm_pool_mgr - shared memory manager + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool_mgr { + const struct tee_shm_pool_mgr_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + * @destroy_poolmgr: called when destroying the pool manager + */ +struct tee_shm_pool_mgr_ops { + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, + size_t size); + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); + void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); +}; + +/** + * tee_shm_pool_alloc() - Create a shared memory pool from shm managers + * @priv_mgr: manager for driver private shared memory allocations + * @dmabuf_mgr: manager for dma-buf shared memory allocations + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, + struct tee_shm_pool_mgr *dmabuf_mgr); + +/* + * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved + * memory + * @vaddr: Virtual address of start of pool + * @paddr: Physical address of start of pool + * @size: Size in bytes of the pool + * + * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. + */ +struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, + size_t size, + int min_alloc_order); + +/** + * tee_shm_pool_mgr_destroy() - Free a shared memory manager + */ +static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) +{ + poolm->ops->destroy_poolmgr(poolm); +} + /** * struct tee_shm_pool_mem_info - holds information needed to create a shared * memory pool @@ -209,6 +318,50 @@ void *tee_get_drvdata(struct tee_device *teedev); */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +/** + * tee_shm_priv_alloc() - Allocate shared memory privately + * @dev: Device that allocates the shared memory + * @size: Requested size of shared memory + * + * Allocates shared memory buffer that is not associated with any client + * context. Such buffers are owned by TEE driver and used for internal calls. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); + +/** + * tee_shm_register() - Register shared memory buffer + * @ctx: Context that registers the shared memory + * @addr: Address is userspace of the shared buffer + * @length: Length of the shared buffer + * @flags: Flags setting properties for the requested shared memory. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, + size_t length, u32 flags); + +/** + * tee_shm_register_fd() - Register shared memory from file descriptor + * + * @ctx: Context that allocates the shared memory + * @fd: shared memory file descriptor reference. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd); + +/** + * tee_shm_is_registered() - Check if shared memory object in registered in TEE + * @shm: Shared memory handle + * @returns true if object is registered in TEE + */ +static inline bool tee_shm_is_registered(struct tee_shm *shm) +{ + return shm && (shm->flags & TEE_SHM_REGISTER); +} + /** * tee_shm_free() - Free shared memory * @shm: Handle to shared memory to free @@ -258,12 +411,48 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs); */ int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); +/** + * tee_shm_get_size() - Get size of shared memory buffer + * @shm: Shared memory handle + * @returns size of shared memory + */ +static inline size_t tee_shm_get_size(struct tee_shm *shm) +{ + return shm->size; +} + +/** + * tee_shm_get_pages() - Get list of pages that hold shared buffer + * @shm: Shared memory handle + * @num_pages: Number of pages will be stored there + * @returns pointer to pages array + */ +static inline struct page **tee_shm_get_pages(struct tee_shm *shm, + size_t *num_pages) +{ + *num_pages = shm->num_pages; + return shm->pages; +} + +/** + * tee_shm_get_page_offset() - Get shared buffer offset from page start + * @shm: Shared memory handle + * @returns page offset of shared buffer + */ +static inline size_t tee_shm_get_page_offset(struct tee_shm *shm) +{ + return shm->offset; +} + /** * tee_shm_get_id() - Get id of a shared memory object * @shm: Shared memory handle * @returns id */ -int tee_shm_get_id(struct tee_shm *shm); +static inline int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} /** * tee_shm_get_from_id() - Find shared memory object and increase reference @@ -274,4 +463,89 @@ int tee_shm_get_id(struct tee_shm *shm); */ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); +/** + * tee_client_open_context() - Open a TEE context + * @start: if not NULL, continue search after this context + * @match: function to check TEE device + * @data: data for match function + * @vers: if not NULL, version data of TEE device of the context returned + * + * This function does an operation similar to open("/dev/teeX") in user space. + * A returned context must be released with tee_client_close_context(). + * + * Returns a TEE context of the first TEE device matched by the match() + * callback or an ERR_PTR. + */ +struct tee_context * +tee_client_open_context(struct tee_context *start, + int (*match)(struct tee_ioctl_version_data *, + const void *), + const void *data, struct tee_ioctl_version_data *vers); + +/** + * tee_client_close_context() - Close a TEE context + * @ctx: TEE context to close + * + * Note that all sessions previously opened with this context will be + * closed when this function is called. + */ +void tee_client_close_context(struct tee_context *ctx); + +/** + * tee_client_get_version() - Query version of TEE + * @ctx: TEE context to TEE to query + * @vers: Pointer to version data + */ +void tee_client_get_version(struct tee_context *ctx, + struct tee_ioctl_version_data *vers); + +/** + * tee_client_open_session() - Open a session to a Trusted Application + * @ctx: TEE context + * @arg: Open session arguments, see description of + * struct tee_ioctl_open_session_arg + * @param: Parameters passed to the Trusted Application + * + * Returns < 0 on error else see @arg->ret for result. If @arg->ret + * is TEEC_SUCCESS the session identifier is available in @arg->session. + */ +int tee_client_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + +/** + * tee_client_close_session() - Close a session to a Trusted Application + * @ctx: TEE Context + * @session: Session id + * + * Return < 0 on error else 0, regardless the session will not be + * valid after this function has returned. + */ +int tee_client_close_session(struct tee_context *ctx, u32 session); + +/** + * tee_client_invoke_func() - Invoke a function in a Trusted Application + * @ctx: TEE Context + * @arg: Invoke arguments, see description of + * struct tee_ioctl_invoke_arg + * @param: Parameters passed to the Trusted Application + * + * Returns < 0 on error else see @arg->ret for result. + */ +int tee_client_invoke_func(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + +static inline bool tee_param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + #endif /*__TEE_DRV_H*/ diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index 370d8845ab2173..08fb98f5fa3ce5 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -49,6 +49,8 @@ #define TEE_MAX_ARG_SIZE 1024 #define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ +#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ +#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */ /* * TEE Implementation ID @@ -115,6 +117,35 @@ struct tee_ioctl_shm_alloc_data { #define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \ struct tee_ioctl_shm_alloc_data) +/** + * struct tee_ioctl_shm_register_fd_data - Shared memory registering argument + * @fd: [in] file descriptor identifying the shared memory + * @size: [out] Size of shared memory to allocate + * @flags: [in] Flags to/from allocation. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_ALLOC below. + */ +struct tee_ioctl_shm_register_fd_data { + __s64 fd; + __u64 size; + __u32 flags; + __s32 id; +} __aligned(8); + +/** + * TEE_IOC_SHM_REGISTER_FD - register a shared memory from a file descriptor + * + * Returns a file descriptor on success or < 0 on failure + * + * The returned file descriptor refers to the shared memory object in kernel + * land. The shared memory is freed when the descriptor is closed. + */ +#define TEE_IOC_SHM_REGISTER_FD _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 8, \ + struct tee_ioctl_shm_register_fd_data) + /** * struct tee_ioctl_buf_data - Variable sized buffer * @buf_ptr: [in] A __user pointer to a buffer @@ -153,6 +184,13 @@ struct tee_ioctl_buf_data { */ #define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff +/* Meta parameter carrying extra information about the message. */ +#define TEE_IOCTL_PARAM_ATTR_META 0x100 + +/* Mask of all known attr bits */ +#define TEE_IOCTL_PARAM_ATTR_MASK \ + (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META) + /* * Matches TEEC_LOGIN_* in GP TEE Client API * Are only defined for GP compliant TEEs @@ -331,6 +369,35 @@ struct tee_iocl_supp_send_arg { #define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ struct tee_ioctl_buf_data) +/** + * struct tee_ioctl_shm_register_data - Shared memory register argument + * @addr: [in] Start address of shared memory to register + * @length: [in/out] Length of shared memory to register + * @flags: [in/out] Flags to/from registration. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_REGISTER below. + */ +struct tee_ioctl_shm_register_data { + __u64 addr; + __u64 length; + __u32 flags; + __s32 id; +}; + +/** + * TEE_IOC_SHM_REGISTER - Register shared memory argument + * + * Registers shared memory between the user space process and secure OS. + * + * Returns a file descriptor on success or < 0 on failure + * + * The shared memory is unregisterred when the descriptor is closed. + */ +#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \ + struct tee_ioctl_shm_register_data) /* * Five syscalls are used when communicating with the TEE driver. * open(): opens the device associated with the driver diff --git a/upstream-tee-subsys-patches.txt b/upstream-tee-subsys-patches.txt new file mode 100644 index 00000000000000..aaf3cdf10e10cc --- /dev/null +++ b/upstream-tee-subsys-patches.txt @@ -0,0 +1,48 @@ +Patches relating to the TEE subsystem +===================================== + +This is a list of all the patches that relates to the TEE subsystem. The +text inside the brackets are the kernel version where it was introduced, +followed by the sha1 hash in the upstream kernel tree. + + +[v4.16] ded4c39e93f3 arm/arm64: smccc: Make function identifiers an unsigned quantity +[v4.16] 2490cdf6435b tee: shm: Potential NULL dereference calling tee_shm_register() +[v4.16] c94f31b526fe tee: shm: don't put_page on null shm->pages +[v4.16] 80ec6f5de60b tee: shm: make function __tee_shm_alloc static +[v4.16] cdbcf83d29c1 tee: optee: check type of registered shared memory +[v4.16] 95ffe4ca4387 tee: add start argument to shm_register callback +[v4.16] f681e08f671a tee: optee: fix header dependencies +[v4.16] ef8e08d24ca8 tee: shm: inline tee_shm_get_id() +[v4.16] 217e0250cccb tee: use reference counting for tee_context +[v4.16] f58e236c9d66 tee: optee: enable dynamic SHM support +[v4.16] abd135ba215c tee: optee: add optee-specific shared pool implementation +[v4.16] d885cc5e0759 tee: optee: store OP-TEE capabilities in private data +[v4.16] 53a107c812de tee: optee: add registered buffers handling into RPC calls +[v4.16] 64cf9d8a672e tee: optee: add registered shared parameters handling +[v4.16] 06ca79179c4e tee: optee: add shared buffer registration functions +[v4.16] 3bb48ba5cd60 tee: optee: add page list manipulation functions +[v4.16] de5c6dfc43da tee: optee: Update protocol definitions +[v4.16] e0c69ae8bfb5 tee: shm: add page accessor functions +[v4.16] b25946ad951c tee: shm: add accessors for buffer size and page offset +[v4.16] 033ddf12bcf5 tee: add register user memory +[v4.16] e2aca5d8928a tee: flexible shared memory pool creation +[v4.16] 1647a5ac1754 optee: support asynchronous supplicant requests +[v4.16] f2aa97240c84 tee: add TEE_IOCTL_PARAM_ATTR_META +[v4.16] 84debcc53533 tee: add tee_param_is_memref() for driver use +[v4.15] f044113113dd optee: fix invalid of_node_put() in optee_driver_init() +[v4.14] 39e6519a3f13 tee: optee: sync with new naming of interrupts +[v4.14] 059cf566e123 tee: indicate privileged dev in gen_caps +[v4.14] a9980e947ec9 tee: optee: interruptible RPC sleep +[v4.14] 96e72ddeec45 tee: optee: add const to tee_driver_ops and tee_desc structures +[v4.14] 53e3ca5cee24 tee: tee_shm: Constify dma_buf_ops structures. +[v4.14] 999616b8536c tee: add forward declaration for struct device +[v4.14] efb14036bd7f tee: optee: fix uninitialized symbol 'parg' +[v4.12] e84188852a72 tee: add ARM_SMCCC dependency +[v4.12] 4fb0a5eb364d tee: add OP-TEE driver +[v4.12] 967c9cca2cc5 tee: generic TEE subsystem +[v4.5] 14457459f9ca ARM: 8480/2: arm64: add implementation for arm-smccc +[v4.5] b329f95d70f3 ARM: 8479/2: add implementation for arm-smccc +[v4.5] 98dd64f34f47 ARM: 8478/2: arm/arm64: add arm-smccc + +