diff --git a/libcpu/risc-v/common64/mmu.c b/libcpu/risc-v/common64/mmu.c index 6cab811eca7..19c7f7c0195 100644 --- a/libcpu/risc-v/common64/mmu.c +++ b/libcpu/risc-v/common64/mmu.c @@ -691,11 +691,13 @@ void rt_hw_mem_setup_early(void) * identical mapping, * PC are still at lower region before relocating to high memory */ - for (size_t i = 0; i < __SIZE(PPN0_BIT); i++) - { - early_pgtbl[i] = COMBINEPTE(ps, MMU_MAP_EARLY); - ps += L1_PAGE_SIZE; - } + rt_ubase_t pg_idx ; + /* Round down symb_pc to L1_PAGE_SIZE boundary to ensure proper page alignment. + * This is necessary because MMU operations work with page-aligned addresses, and + * make sure all the text region is mapped.*/ + ps = (rt_ubase_t)symb_pc & (~(L1_PAGE_SIZE - 1)); + pg_idx = GET_L1(ps); + early_pgtbl[pg_idx] = COMBINEPTE(ps, MMU_MAP_EARLY); /* relocate text region */ __asm__ volatile("la %0, _start\n" : "=r"(ps));