summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 450c1b1)
raw | patch | inline | side by side (parent: 450c1b1)
author | Jens Wiklander <jens.wiklander@linaro.org> | |
Mon, 4 Jan 2021 07:34:49 +0000 (08:34 +0100) | ||
committer | Jérôme Forissier <jerome@forissier.org> | |
Thu, 7 Jan 2021 14:49:29 +0000 (15:49 +0100) |
Adds core_mmu_remove_mapping() which removes mappings earlier added with
core_mmu_add_mapping().
Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org>
Acked-by: Jerome Forissier <jerome@forissier.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
core_mmu_add_mapping().
Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org>
Acked-by: Jerome Forissier <jerome@forissier.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
core/arch/arm/include/mm/core_mmu.h | patch | blob | history | |
core/arch/arm/mm/core_mmu.c | patch | blob | history |
index b55a406c93d1978d143807dce0b6154c8ce1573f..33ee2ebd7324c8dbbdd0628c4e269141164d2e42 100644 (file)
(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT);
}
+TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
+ size_t len);
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len);
/* various invalidate secure TLB */
index 59e2e681d876eda108ffea881449dcc59b066c4f..4f27b24995e4392aaddabe79647ccb287fc4c633 100644 (file)
idx, pa, attr);
}
+static void clear_region(struct core_mmu_table_info *tbl_info,
+ struct tee_mmap_region *region)
+{
+ unsigned int end = 0;
+ unsigned int idx = 0;
+
+ /* va, len and pa should be block aligned */
+ assert(!core_mmu_get_block_offset(tbl_info, region->va));
+ assert(!core_mmu_get_block_offset(tbl_info, region->size));
+ assert(!core_mmu_get_block_offset(tbl_info, region->pa));
+
+ idx = core_mmu_va2idx(tbl_info, region->va);
+ end = core_mmu_va2idx(tbl_info, region->va + region->size);
+
+ while (idx < end) {
+ core_mmu_set_entry(tbl_info, idx, 0, 0);
+ idx++;
+ }
+}
+
static void set_region(struct core_mmu_table_info *tbl_info,
struct tee_mmap_region *region)
{
set_pg_region(dir_info, r, &pgt, &pg_info);
}
+TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
+ size_t len)
+{
+ struct core_mmu_table_info tbl_info = { };
+ struct tee_mmap_region *res_map = NULL;
+ struct tee_mmap_region *map = NULL;
+ paddr_t pa = virt_to_phys(addr);
+ size_t granule = 0;
+ ptrdiff_t i = 0;
+ paddr_t p = 0;
+ size_t l = 0;
+
+ map = find_map_by_type_and_pa(type, pa);
+ if (!map)
+ return TEE_ERROR_GENERIC;
+
+ res_map = find_map_by_type(MEM_AREA_RES_VASPACE);
+ if (!res_map)
+ return TEE_ERROR_GENERIC;
+ if (!core_mmu_find_table(NULL, res_map->va, UINT_MAX, &tbl_info))
+ return TEE_ERROR_GENERIC;
+ granule = BIT(tbl_info.shift);
+
+ if (map < static_memory_map ||
+ map >= static_memory_map + ARRAY_SIZE(static_memory_map))
+ return TEE_ERROR_GENERIC;
+ i = map - static_memory_map;
+
+ /* Check that we have a full match */
+ p = ROUNDDOWN(pa, granule);
+ l = ROUNDUP(len + pa - p, granule);
+ if (map->pa != p || map->size != l)
+ return TEE_ERROR_GENERIC;
+
+ clear_region(&tbl_info, map);
+ tlbi_all();
+
+ /* If possible remove the va range from res_map */
+ if (res_map->va - map->size == map->va) {
+ res_map->va -= map->size;
+ res_map->size += map->size;
+ }
+
+ /* Remove the entry. */
+ memmove(map, map + 1,
+ (ARRAY_SIZE(static_memory_map) - i - 1) * sizeof(*map));
+
+ /* Clear the last new entry in case it was used */
+ memset(static_memory_map + ARRAY_SIZE(static_memory_map) - 1,
+ 0, sizeof(*map));
+
+ return TEE_SUCCESS;
+}
+
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
{
struct core_mmu_table_info tbl_info;