Skip to content

Commit 95d260e

Browse files
dcpleungcfriedt
authored andcommitted
xtensa: mmu/ptables: rename flags to attrs under arch_mem_map()
arch_mem_map() takes in some flags to describe the to-be mapped memory regions' permissions and cache status. When the flags are translated, they become attributes in PTEs. So for functions being called by arch_mem_map() and beyond, rename flags to attrs to better describe its purpose. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
1 parent 84ade18 commit 95d260e

File tree

1 file changed

+17
-17
lines changed

1 file changed

+17
-17
lines changed

arch/xtensa/core/ptables.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ __weak void arch_reserved_pages_update(void)
379379
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
380380

381381
static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
382-
uint32_t flags, bool is_user)
382+
uint32_t attrs, bool is_user)
383383
{
384384
uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
385385
uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
@@ -407,20 +407,20 @@ static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
407407
XTENSA_MMU_KERNEL_RING,
408408
XTENSA_MMU_PTE_SW(XTENSA_MMU_KERNEL_RING,
409409
XTENSA_MMU_PTE_ATTR_ILLEGAL),
410-
flags);
410+
attrs);
411411

412412
sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
413413
xtensa_tlb_autorefill_invalidate();
414414

415415
return true;
416416
}
417417

418-
static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags, bool is_user)
418+
static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t new_attrs, bool is_user)
419419
{
420420
bool ret;
421421
void *vaddr, *vaddr_uc;
422422
uintptr_t paddr, paddr_uc;
423-
uint32_t flags, flags_uc;
423+
uint32_t attrs, attrs_uc;
424424

425425
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
426426
if (sys_cache_is_ptr_cached(va)) {
@@ -439,21 +439,21 @@ static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags,
439439
paddr_uc = pa;
440440
}
441441

442-
flags_uc = (xtensa_flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
443-
flags = flags_uc | XTENSA_MMU_CACHED_WB;
442+
attrs_uc = (new_attrs & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
443+
attrs = attrs_uc | XTENSA_MMU_CACHED_WB;
444444
} else {
445445
vaddr = va;
446446
paddr = pa;
447-
flags = xtensa_flags;
447+
attrs = new_attrs;
448448
}
449449

450450
ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr, paddr,
451-
flags, is_user);
451+
attrs, is_user);
452452
__ASSERT(ret, "Cannot map virtual address (%p)", va);
453453

454454
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
455455
ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr_uc, paddr_uc,
456-
flags_uc, is_user);
456+
attrs_uc, is_user);
457457
__ASSERT(ret, "Cannot map virtual address (%p)", vaddr_uc);
458458
}
459459

@@ -470,14 +470,14 @@ static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags,
470470
domain = CONTAINER_OF(node, struct arch_mem_domain, node);
471471

472472
ret = l2_page_table_map(domain->ptables, (void *)vaddr, paddr,
473-
flags, is_user);
473+
attrs, is_user);
474474
__ASSERT(ret, "Cannot map virtual address (%p) for domain %p",
475475
vaddr, domain);
476476

477477
if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
478478
ret = l2_page_table_map(domain->ptables,
479479
(void *)vaddr_uc, paddr_uc,
480-
flags_uc, is_user);
480+
attrs_uc, is_user);
481481
__ASSERT(ret, "Cannot map virtual address (%p) for domain %p",
482482
vaddr_uc, domain);
483483
}
@@ -492,7 +492,7 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
492492
uint32_t va = (uint32_t)virt;
493493
uint32_t pa = (uint32_t)phys;
494494
uint32_t rem_size = (uint32_t)size;
495-
uint32_t xtensa_flags = 0;
495+
uint32_t attrs = 0;
496496
k_spinlock_key_t key;
497497
bool is_user;
498498

@@ -505,10 +505,10 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
505505
switch (flags & K_MEM_CACHE_MASK) {
506506

507507
case K_MEM_CACHE_WB:
508-
xtensa_flags |= XTENSA_MMU_CACHED_WB;
508+
attrs |= XTENSA_MMU_CACHED_WB;
509509
break;
510510
case K_MEM_CACHE_WT:
511-
xtensa_flags |= XTENSA_MMU_CACHED_WT;
511+
attrs |= XTENSA_MMU_CACHED_WT;
512512
break;
513513
case K_MEM_CACHE_NONE:
514514
__fallthrough;
@@ -517,18 +517,18 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
517517
}
518518

519519
if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) {
520-
xtensa_flags |= XTENSA_MMU_PERM_W;
520+
attrs |= XTENSA_MMU_PERM_W;
521521
}
522522
if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) {
523-
xtensa_flags |= XTENSA_MMU_PERM_X;
523+
attrs |= XTENSA_MMU_PERM_X;
524524
}
525525

526526
is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER;
527527

528528
key = k_spin_lock(&xtensa_mmu_lock);
529529

530530
while (rem_size > 0) {
531-
__arch_mem_map((void *)va, pa, xtensa_flags, is_user);
531+
__arch_mem_map((void *)va, pa, attrs, is_user);
532532

533533
rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
534534
va += KB(4);

0 commit comments

Comments
 (0)