Skip to content

Commit 84ade18

Browse files
dcpleungcfriedt
authored andcommitted
xtensa: mmu: cosmetic changes to page table variable names
In functions which manipulate both L1 and L2 tables, make the variable names obvious by prefixing them with l1_ or l2_. This is mainly done to avoid confusion when reading through those functions. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
1 parent e709cbe commit 84ade18

File tree

1 file changed

+43
-42
lines changed

1 file changed

+43
-42
lines changed

arch/xtensa/core/ptables.c

Lines changed: 43 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -62,14 +62,14 @@ BUILD_ASSERT(CONFIG_MMU_PAGE_SIZE == 0x1000,
6262
* Each memory domain contains its own l1 page table. The kernel l1 page table is
6363
* located at the index 0.
6464
*/
65-
static uint32_t l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES][XTENSA_L1_PAGE_TABLE_ENTRIES]
65+
static uint32_t l1_page_tables[CONFIG_XTENSA_MMU_NUM_L1_TABLES][XTENSA_L1_PAGE_TABLE_ENTRIES]
6666
__aligned(KB(4));
6767

6868

6969
/*
7070
* That is an alias for the page tables set used by the kernel.
7171
*/
72-
uint32_t *xtensa_kernel_ptables = (uint32_t *)l1_page_table[0];
72+
uint32_t *xtensa_kernel_ptables = (uint32_t *)l1_page_tables[0];
7373

7474
/*
7575
* Each table in the level 2 maps a 4Mb memory range. It consists of 1024 entries each one
@@ -84,7 +84,7 @@ static uint32_t l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES][XTENSA_L2_PAGE_T
8484
*
8585
* @note: The first bit is set because it is used for the kernel page tables.
8686
*/
87-
static ATOMIC_DEFINE(l1_page_table_track, CONFIG_XTENSA_MMU_NUM_L1_TABLES);
87+
static ATOMIC_DEFINE(l1_page_tables_track, CONFIG_XTENSA_MMU_NUM_L1_TABLES);
8888

8989
/*
9090
* This additional variable tracks which l2 tables are in use. This is kept separated from
@@ -217,7 +217,7 @@ static inline uint32_t *alloc_l2_table(void)
217217
static void map_memory_range(const uint32_t start, const uint32_t end,
218218
const uint32_t attrs, const uint32_t options)
219219
{
220-
uint32_t page, *table;
220+
uint32_t page;
221221
bool shared = !!(attrs & XTENSA_MMU_MAP_SHARED);
222222
bool do_save_attrs = (options & OPTION_SAVE_ATTRS) == OPTION_SAVE_ATTRS;
223223
uint32_t ring, sw_attrs, sw_ring, pte_sw;
@@ -228,26 +228,27 @@ static void map_memory_range(const uint32_t start, const uint32_t end,
228228
pte_sw = XTENSA_MMU_PTE_SW(sw_ring, sw_attrs);
229229

230230
for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) {
231+
uint32_t *l2_table;
231232
uint32_t pte = XTENSA_MMU_PTE(page, ring, pte_sw, attrs);
232233
uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
233234
uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
234235

235236
if (is_pte_illegal(xtensa_kernel_ptables[l1_pos])) {
236-
table = alloc_l2_table();
237+
l2_table = alloc_l2_table();
237238

238-
__ASSERT(table != NULL, "There is no l2 page table available to "
239-
"map 0x%08x\n", page);
239+
__ASSERT(l2_table != NULL,
240+
"There is no l2 page table available to map 0x%08x\n", page);
240241

241-
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES,
242+
init_page_table(l2_table, XTENSA_L2_PAGE_TABLE_ENTRIES,
242243
XTENSA_MMU_PTE_L2_ILLEGAL);
243244

244245
xtensa_kernel_ptables[l1_pos] =
245-
XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
246+
XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
246247
0, XTENSA_MMU_PAGE_TABLE_ATTR);
247248
}
248249

249-
table = (uint32_t *)(xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
250-
table[l2_pos] = pte;
250+
l2_table = (uint32_t *)(xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
251+
l2_table[l2_pos] = pte;
251252
}
252253
}
253254

@@ -289,7 +290,7 @@ static void xtensa_init_page_tables(void)
289290

290291
init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES,
291292
XTENSA_MMU_PTE_L1_ILLEGAL);
292-
atomic_set_bit(l1_page_table_track, 0);
293+
atomic_set_bit(l1_page_tables_track, 0);
293294

294295
for (entry = 0; entry < ARRAY_SIZE(mmu_zephyr_ranges); entry++) {
295296
const struct xtensa_mmu_range *range = &mmu_zephyr_ranges[entry];
@@ -308,8 +309,8 @@ static void xtensa_init_page_tables(void)
308309
* must be writable, obviously). They shouldn't be left at
309310
* the default.
310311
*/
311-
map_memory_range((uint32_t) &l1_page_table[0],
312-
(uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES],
312+
map_memory_range((uint32_t) &l1_page_tables[0],
313+
(uint32_t) &l1_page_tables[CONFIG_XTENSA_MMU_NUM_L1_TABLES],
313314
XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W, OPTION_SAVE_ATTRS);
314315
map_memory_range((uint32_t) &l2_page_tables[0],
315316
(uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES],
@@ -382,33 +383,33 @@ static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
382383
{
383384
uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
384385
uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
385-
uint32_t *table;
386+
uint32_t *l2_table;
386387

387388
sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
388389

389390
if (is_pte_illegal(l1_table[l1_pos])) {
390-
table = alloc_l2_table();
391+
l2_table = alloc_l2_table();
391392

392-
if (table == NULL) {
393+
if (l2_table == NULL) {
393394
return false;
394395
}
395396

396-
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES, XTENSA_MMU_PTE_L2_ILLEGAL);
397+
init_page_table(l2_table, XTENSA_L2_PAGE_TABLE_ENTRIES, XTENSA_MMU_PTE_L2_ILLEGAL);
397398

398-
l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
399+
l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
399400
0, XTENSA_MMU_PAGE_TABLE_ATTR);
400401

401402
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
402403
}
403404

404-
table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
405-
table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING :
406-
XTENSA_MMU_KERNEL_RING,
407-
XTENSA_MMU_PTE_SW(XTENSA_MMU_KERNEL_RING,
408-
XTENSA_MMU_PTE_ATTR_ILLEGAL),
409-
flags);
405+
l2_table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
406+
l2_table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING :
407+
XTENSA_MMU_KERNEL_RING,
408+
XTENSA_MMU_PTE_SW(XTENSA_MMU_KERNEL_RING,
409+
XTENSA_MMU_PTE_ATTR_ILLEGAL),
410+
flags);
410411

411-
sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0]));
412+
sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
412413
xtensa_tlb_autorefill_invalidate();
413414

414415
return true;
@@ -551,7 +552,7 @@ static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
551552
uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
552553
uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
553554
uint32_t *l2_table;
554-
uint32_t table_pos;
555+
uint32_t table_trk_pos;
555556
bool exec;
556557

557558
sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
@@ -589,8 +590,8 @@ static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
589590
l1_table[l1_pos] = XTENSA_MMU_PTE_L1_ILLEGAL;
590591
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
591592

592-
table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
593-
atomic_clear_bit(l2_page_tables_track, table_pos);
593+
table_trk_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
594+
atomic_clear_bit(l2_page_tables_track, table_trk_pos);
594595

595596
end:
596597
/* Need to invalidate L2 page table as it is no longer valid. */
@@ -696,7 +697,7 @@ void xtensa_mmu_tlb_shootdown(void)
696697
/* We don't have information on which page tables have changed,
697698
* so we just invalidate the cache for all L1 page tables.
698699
*/
699-
sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table));
700+
sys_cache_data_invd_range((void *)l1_page_tables, sizeof(l1_page_tables));
700701
sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
701702
}
702703

@@ -793,8 +794,8 @@ static inline uint32_t *alloc_l1_table(void)
793794
uint16_t idx;
794795

795796
for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L1_TABLES; idx++) {
796-
if (!atomic_test_and_set_bit(l1_page_table_track, idx)) {
797-
return (uint32_t *)&l1_page_table[idx];
797+
if (!atomic_test_and_set_bit(l1_page_tables_track, idx)) {
798+
return (uint32_t *)&l1_page_tables[idx];
798799
}
799800
}
800801

@@ -804,9 +805,9 @@ static inline uint32_t *alloc_l1_table(void)
804805
static uint32_t *dup_table(void)
805806
{
806807
uint16_t i, j;
807-
uint32_t *dst_table = alloc_l1_table();
808+
uint32_t *l1_table = alloc_l1_table();
808809

809-
if (!dst_table) {
810+
if (!l1_table) {
810811
return NULL;
811812
}
812813

@@ -815,7 +816,7 @@ static uint32_t *dup_table(void)
815816

816817
if (is_pte_illegal(xtensa_kernel_ptables[i]) ||
817818
(i == XTENSA_MMU_L1_POS(XTENSA_MMU_PTEVADDR))) {
818-
dst_table[i] = XTENSA_MMU_PTE_L1_ILLEGAL;
819+
l1_table[i] = XTENSA_MMU_PTE_L1_ILLEGAL;
819820
continue;
820821
}
821822

@@ -832,15 +833,15 @@ static uint32_t *dup_table(void)
832833
/* The page table is using kernel ASID because we don't
833834
* user thread manipulate it.
834835
*/
835-
dst_table[i] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
836-
0, XTENSA_MMU_PAGE_TABLE_ATTR);
836+
l1_table[i] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
837+
0, XTENSA_MMU_PAGE_TABLE_ATTR);
837838

838839
sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE);
839840
}
840841

841-
sys_cache_data_flush_range((void *)dst_table, XTENSA_L1_PAGE_TABLE_SIZE);
842+
sys_cache_data_flush_range((void *)l1_table, XTENSA_L1_PAGE_TABLE_SIZE);
842843

843-
return dst_table;
844+
return l1_table;
844845

845846
err:
846847
/* TODO: Cleanup failed allocation*/
@@ -894,7 +895,7 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
894895
return ret;
895896
}
896897

897-
static void region_map_update(uint32_t *ptables, uintptr_t start,
898+
static void region_map_update(uint32_t *l1_table, uintptr_t start,
898899
size_t size, uint32_t ring, uint32_t flags, uint32_t option)
899900
{
900901
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
@@ -904,9 +905,9 @@ static void region_map_update(uint32_t *ptables, uintptr_t start,
904905
uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
905906
uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
906907
/* Make sure we grab a fresh copy of L1 page table */
907-
sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0]));
908+
sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
908909

909-
l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
910+
l2_table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
910911

911912
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
912913

0 commit comments

Comments
 (0)