diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h index 4bd0bb2..8c0a567 100644 --- a/include/lib/xlat_tables/xlat_tables_v2.h +++ b/include/lib/xlat_tables/xlat_tables_v2.h @@ -34,6 +34,12 @@ MAP_REGION(_adr, _adr, _sz, _attr) /* + * Helper macro to define entries for mmap_region_t. It allows to define 'pa' + * and sets 'va' to 0 for each region. To be used with mmap_add_alloc_va(). + */ +#define MAP_REGION_ALLOC_VA(pa, sz, attr) MAP_REGION(pa, 0, sz, attr) + +/* * Helper macro to define an mmap_region_t to map with the desired granularity * of translation tables. * @@ -219,6 +225,21 @@ void mmap_add(const mmap_region_t *mm); void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm); +/* + * Add a region with defined base PA. Returns base VA calculated using the + * highest existing region in the mmap array even if it fails to allocate the + * region. + */ +void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va, + size_t size, unsigned int attr); +void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm); + +/* + * Add an array of static regions with defined base PA, and fill the base VA + * field on the array of structs. This function can only be used before + * initializing the translation tables. The regions cannot be removed afterwards. + */ +void mmap_add_alloc_va(mmap_region_t *mm); #if PLAT_XLAT_TABLES_DYNAMIC /* @@ -237,6 +258,21 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm); /* + * Add a dynamic region with defined base PA. Returns base VA calculated using + * the highest existing region in the mmap array even if it fails to allocate + * the region. + * + * mmap_add_dynamic_region_alloc_va() returns the allocated VA in 'base_va'. + * mmap_add_dynamic_region_alloc_va_ctx() returns it in 'mm->base_va'. + * + * It returns the same error values as mmap_add_dynamic_region(). + */ +int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa, + uintptr_t *base_va, + size_t size, unsigned int attr); +int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm); + +/* * Remove a region with the specified base VA and size. Only dynamic regions can * be removed, and they can be removed even if the translation tables are * initialized. diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c index f180774..b887427 100644 --- a/lib/xlat_tables_v2/xlat_tables_context.c +++ b/lib/xlat_tables_v2/xlat_tables_context.c @@ -38,6 +38,25 @@ mmap_add_ctx(&tf_xlat_ctx, mm); } +void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va, + size_t size, unsigned int attr) +{ + mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); + + mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, &mm); + + *base_va = mm.base_va; +} + +void mmap_add_alloc_va(mmap_region_t *mm) +{ + while (mm->granularity != 0U) { + assert(mm->base_va == 0U); + mmap_add_region_alloc_va_ctx(&tf_xlat_ctx, mm); + mm++; + } +} + #if PLAT_XLAT_TABLES_DYNAMIC int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va, @@ -48,6 +67,20 @@ return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); } +int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa, + uintptr_t *base_va, size_t size, + unsigned int attr) +{ + mmap_region_t mm = MAP_REGION_ALLOC_VA(base_pa, size, attr); + + int rc = mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx, &mm); + + *base_va = mm.base_va; + + return rc; +} + + int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) { return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c index 8ced76e..185473a 100644 --- a/lib/xlat_tables_v2/xlat_tables_core.c +++ b/lib/xlat_tables_v2/xlat_tables_core.c @@ -811,6 +811,80 @@ ctx->max_va = end_va; } +/* + * Determine the table level closest to the initial lookup level that + * can describe this translation. Then, align base VA to the next block + * at the determined level. + */ +static void mmap_alloc_va_align_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + /* + * By or'ing the size and base PA the alignment will be the one + * corresponding to the smallest boundary of the two of them. + * + * There are three different cases. For example (for 4 KiB page size): + * + * +--------------+------------------++--------------+ + * | PA alignment | Size multiple of || VA alignment | + * +--------------+------------------++--------------+ + * | 2 MiB | 2 MiB || 2 MiB | (1) + * | 2 MiB | 4 KiB || 4 KiB | (2) + * | 4 KiB | 2 MiB || 4 KiB | (3) + * +--------------+------------------++--------------+ + * + * - In (1), it is possible to take advantage of the alignment of the PA + * and the size of the region to use a level 2 translation table + * instead of a level 3 one. + * + * - In (2), the size is smaller than a block entry of level 2, so it is + * needed to use a level 3 table to describe the region or the library + * will map more memory than the desired one. + * + * - In (3), even though the region has the size of one level 2 block + * entry, it isn't possible to describe the translation with a level 2 + * block entry because of the alignment of the base PA. + * + * Only bits 47:21 of a level 2 block descriptor are used by the MMU, + * bits 20:0 of the resulting address are 0 in this case. Because of + * this, the PA generated as result of this translation is aligned to + * 2 MiB. The PA that was requested to be mapped is aligned to 4 KiB, + * though, which means that the resulting translation is incorrect. + * The only way to prevent this is by using a finer granularity. + */ + unsigned long long align_check; + + align_check = mm->base_pa | (unsigned long long)mm->size; + + /* + * Assume it is always aligned to level 3. There's no need to check that + * level because its block size is PAGE_SIZE. The checks to verify that + * the addresses and size are aligned to PAGE_SIZE are inside + * mmap_add_region. + */ + for (unsigned int level = ctx->base_level; level <= 2U; ++level) { + + if ((align_check & XLAT_BLOCK_MASK(level)) != 0U) + continue; + + mm->base_va = round_up(mm->base_va, XLAT_BLOCK_SIZE(level)); + return; + } +} + +void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + mm->base_va = ctx->max_va + 1UL; + + assert(mm->size > 0U); + + mmap_alloc_va_align_ctx(ctx, mm); + + /* Detect overflows. More checks are done in mmap_add_region_check(). */ + assert(mm->base_va > ctx->max_va); + + mmap_add_region_ctx(ctx, mm); +} + void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm) { const mmap_region_t *mm_cursor = mm; @@ -931,6 +1005,23 @@ return 0; } +int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +{ + mm->base_va = ctx->max_va + 1UL; + + if (mm->size == 0U) + return 0; + + mmap_alloc_va_align_ctx(ctx, mm); + + /* Detect overflows. More checks are done in mmap_add_region_check(). */ + if (mm->base_va < ctx->max_va) { + return -ENOMEM; + } + + return mmap_add_dynamic_region_ctx(ctx, mm); +} + /* * Removes the region with given base Virtual Address and size from the given * context.