当前位置:   article > 正文

optee 中静态memory的管理_optee_memory

optee_memory
optee os中的静态memory都存在static_memory_map 这个数组中,这里的#define MAX_MMAP_REGIONS    13
所以这个数组最大保存14个静态memory
static struct tee_mmap_region static_memory_map[MAX_MMAP_REGIONS + 1] __early_bss;
这个数组会在core_init_mmu_map 中加入一些memory
    if (!mem_map_inited)
        init_mem_map(static_memory_map, ARRAY_SIZE(static_memory_map));
如果没有初始化的话就调用init_mem_map来初始化
static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems)
{
    const struct core_mmu_phys_mem *mem;
    struct tee_mmap_region *map;
    size_t last = 0;
    size_t __maybe_unused count = 0;
    vaddr_t va;
    vaddr_t __maybe_unused end;
    bool __maybe_unused va_is_secure = true; /* any init value fits */
//这个for循环是先将位于__start_phys_mem_map_section 和 __end_phys_mem_map_section 的 physic memory通过add_phys_mem加入到static_memory_map 中
    for (mem = &__start_phys_mem_map_section;
         mem < &__end_phys_mem_map_section; mem++) {
        struct core_mmu_phys_mem m = *mem;

        if (!m.size)
            continue;

        /* Only unmapped virtual range may have a null phys addr */
        assert(m.addr || !core_mmu_type_to_attr(m.type));

        if (m.type == MEM_AREA_IO_NSEC || m.type == MEM_AREA_IO_SEC) {
            m.addr = ROUNDDOWN(m.addr, CORE_MMU_PGDIR_SIZE);
            m.size = ROUNDUP(m.size + (mem->addr - m.addr),
                     CORE_MMU_PGDIR_SIZE);
        }
        add_phys_mem(memory_map, num_elems, &m, &last);
    }
//加入sdp memory
    verify_sdp_mem_areas(memory_map, num_elems);
//加入类型为MEM_AREA_RES_VASPACE的虚拟地址空间
    add_va_space(memory_map, num_elems, MEM_AREA_RES_VASPACE,
             RES_VASPACE_SIZE, &last);
//加入类型为MEM_AREA_SHM_VASPACE的虚拟地址空间
    add_va_space(memory_map, num_elems, MEM_AREA_SHM_VASPACE,
             RES_VASPACE_SIZE, &last);

    memory_map[last].type = MEM_AREA_NOTYPE;

    /*
     * Assign region sizes, note that MEM_AREA_TEE_RAM always uses
     * SMALL_PAGE_SIZE if paging is enabled.
     */
//给map->region_size赋值
    for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
        paddr_t mask = map->pa | map->size;

        if (!(mask & CORE_MMU_PGDIR_MASK))
            map->region_size = CORE_MMU_PGDIR_SIZE;
        else if (!(mask & SMALL_PAGE_MASK))
            map->region_size = SMALL_PAGE_SIZE;
        else
            panic("Impossible memory alignment");

#ifdef CFG_WITH_PAGER
        if (map_is_tee_ram(map))
            map->region_size = SMALL_PAGE_SIZE;
#endif
    }

    /*
     * To ease mapping and lower use of xlat tables, sort mapping
     * description moving small-page regions after the pgdir regions.
     */
//给数组中的memblock 排序,按照size从小到大
    qsort(memory_map, last, sizeof(struct tee_mmap_region),
        cmp_mmap_by_bigger_region_size);

#if !defined(CFG_WITH_LPAE)
    /*
     * 32bit MMU descriptors cannot mix secure and non-secure mapping in
     * the same level2 table. Hence sort secure mapping from non-secure
     * mapping.
     */
    for (count = 0, map = memory_map; map_is_pgdir(map); count++, map++)
        ;
//继续排序 按照是否secure memory排序
    qsort(memory_map + count, last - count, sizeof(struct tee_mmap_region),
        cmp_mmap_by_secure_attr);
#endif

    /*
     * Map flat mapped addresses first.
     * 'va' will store the lower address of the flat-mapped areas to later
     * setup the virtual mapping of the non flat-mapped areas.
     */
    va = (vaddr_t)~0UL;
    end = 0;
    for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
        if (!map_is_flat_mapped(map))
            continue;

        map->attr = core_mmu_type_to_attr(map->type);
        map->va = map->pa;
        va = MIN(va, ROUNDDOWN(map->va, map->region_size));
        end = MAX(end, ROUNDUP(map->va + map->size, map->region_size));
    }
    assert(va >= CFG_TEE_RAM_START);
    assert(end <= CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE);

    if (core_mmu_place_tee_ram_at_top(va)) {
        /* Map non-flat mapped addresses below flat mapped addresses */
        for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
            if (map_is_flat_mapped(map))
                continue;

#if !defined(CFG_WITH_LPAE)
            if (va_is_secure != map_is_secure(map)) {
                va_is_secure = !va_is_secure;
                va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
            }
#endif
            map->attr = core_mmu_type_to_attr(map->type);
            va -= map->size;
            va = ROUNDDOWN(va, map->region_size);
#if !defined(CFG_WITH_LPAE)
            /* Mapping does not yet support sharing L2 tables */
            va = ROUNDDOWN(va, CORE_MMU_PGDIR_SIZE);
#endif
            map->va = va;
        }
    } else {
        /* Map non-flat mapped addresses above flat mapped addresses */
        va = ROUNDUP(va + CFG_TEE_RAM_VA_SIZE, CORE_MMU_PGDIR_SIZE);
        for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
            if (map_is_flat_mapped(map))
                continue;

#if !defined(CFG_WITH_LPAE)
            if (va_is_secure != map_is_secure(map)) {
                va_is_secure = !va_is_secure;
                va = ROUNDUP(va, CORE_MMU_PGDIR_SIZE);
            }
#endif
            map->attr = core_mmu_type_to_attr(map->type);
            va = ROUNDUP(va, map->region_size);
#if !defined(CFG_WITH_LPAE)
            /* Mapping does not yet support sharing L2 tables */
            va = ROUNDUP(va, CORE_MMU_PGDIR_SIZE);
#endif
            map->va = va;
            va += map->size;
        }
    }
//继续排序按照va 大小排序
    qsort(memory_map, last, sizeof(struct tee_mmap_region),
        cmp_mmap_by_lower_va);
//最终通过dump_mmap_table 来打印至少排了三次序的数组
    dump_mmap_table(memory_map);
}
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/236794
推荐阅读
相关标签
  

闽ICP备14008679号