/*
* Assign region sizes, note that MEM_AREA_TEE_RAM always uses
* SMALL_PAGE_SIZE if paging is enabled.
*/
//给map->region_size赋值
for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
paddr_t mask = map->pa | map->size;
if (!(mask & CORE_MMU_PGDIR_MASK))
map->region_size = CORE_MMU_PGDIR_SIZE;
else if (!(mask & SMALL_PAGE_MASK))
map->region_size = SMALL_PAGE_SIZE;
else
panic("Impossible memory alignment");
#ifdef CFG_WITH_PAGER
if (map_is_tee_ram(map))
map->region_size = SMALL_PAGE_SIZE;
#endif
}
/*
* To ease mapping and lower use of xlat tables, sort mapping
* description moving small-page regions after the pgdir regions.
*/
//给数组中的memblock 排序,按照size从小到大
qsort(memory_map, last, sizeof(struct tee_mmap_region),
cmp_mmap_by_bigger_region_size);
#if !defined(CFG_WITH_LPAE)
/*
* 32bit MMU descriptors cannot mix secure and non-secure mapping in
* the same level2 table. Hence sort secure mapping from non-secure
* mapping.
*/
for (count = 0, map = memory_map; map_is_pgdir(map); count++, map++)
;
//继续排序 按照是否secure memory排序
qsort(memory_map + count, last - count, sizeof(struct tee_mmap_region),
cmp_mmap_by_secure_attr);
#endif
/*
* Map flat mapped addresses first.
* 'va' will store the lower address of the flat-mapped areas to later
* setup the virtual mapping of the non flat-mapped areas.
*/
va = (vaddr_t)~0UL;
end = 0;
for (map = memory_map; map->type != MEM_AREA_NOTYPE; map++) {
if (!map_is_flat_mapped(map))
continue;