Skip to content

Commit c76b338

Browse files
dcpleungcfriedt
authored andcommitted
xtensa: mmu: properly restore PTE attributes via reset_region()
The software bits inside PTE are used to store original PTE attributes and ring value, and those bits are used to restore PTE to previous state. This modifies reset_region() to properly restore attributes and ring value when resetting memory regions to the same as when the system boots. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
1 parent f2cf818 commit c76b338

File tree

2 files changed

+162
-58
lines changed

2 files changed

+162
-58
lines changed

arch/xtensa/core/ptables.c

Lines changed: 110 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,14 @@
2222
*/
2323
#define OPTION_NO_TLB_IPI BIT(0)
2424

25+
/* Restore the PTE attributes if they have been
26+
* stored in the SW bits part in the PTE.
27+
*/
28+
#define OPTION_RESTORE_ATTRS BIT(1)
29+
30+
/* Save the PTE attributes and ring in the SW bits part in the PTE. */
31+
#define OPTION_SAVE_ATTRS BIT(2)
32+
2533
/* Level 1 contains page table entries
2634
* necessary to map the page table itself.
2735
*/
@@ -157,6 +165,8 @@ static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
157165
},
158166
};
159167

168+
static inline uint32_t restore_pte(uint32_t pte);
169+
160170
/**
161171
* @brief Check if the page table entry is illegal.
162172
*
@@ -174,18 +184,19 @@ static inline bool is_pte_illegal(uint32_t pte)
174184
return (attr == 12) || (attr == 14);
175185
}
176186

177-
/*
178-
* @brief Initialize all page table entries to be illegal.
187+
/**
188+
* @brief Initialize all page table entries to the same value (@a val).
179189
*
180-
* @param[in] Pointer to page table.
181-
* @param[in] Number of page table entries in the page table.
190+
* @param[in] ptable Pointer to page table.
191+
* @param[in] num_entries Number of page table entries in the page table.
192+
* @param[in] val Initialize all PTEs with this value.
182193
*/
183-
static void init_page_table(uint32_t *ptable, size_t num_entries)
194+
static void init_page_table(uint32_t *ptable, size_t num_entries, uint32_t val)
184195
{
185196
int i;
186197

187198
for (i = 0; i < num_entries; i++) {
188-
ptable[i] = XTENSA_MMU_PTE_ILLEGAL;
199+
ptable[i] = val;
189200
}
190201
}
191202

@@ -203,18 +214,20 @@ static inline uint32_t *alloc_l2_table(void)
203214
}
204215

205216
static void map_memory_range(const uint32_t start, const uint32_t end,
206-
const uint32_t attrs)
217+
const uint32_t attrs, const uint32_t options)
207218
{
208219
uint32_t page, *table;
209220
bool shared = !!(attrs & XTENSA_MMU_MAP_SHARED);
210-
uint32_t sw_attrs = (attrs & XTENSA_MMU_PTE_ATTR_ORIGINAL) == XTENSA_MMU_PTE_ATTR_ORIGINAL ?
211-
attrs : 0;
221+
bool do_save_attrs = (options & OPTION_SAVE_ATTRS) == OPTION_SAVE_ATTRS;
222+
uint32_t ring, sw_attrs, sw_ring, pte_sw;
223+
224+
ring = shared ? XTENSA_MMU_SHARED_RING : XTENSA_MMU_KERNEL_RING;
225+
sw_attrs = do_save_attrs ? attrs : XTENSA_MMU_PTE_ATTR_ILLEGAL;
226+
sw_ring = do_save_attrs ? ring : XTENSA_MMU_KERNEL_RING;
227+
pte_sw = XTENSA_MMU_PTE_SW(sw_ring, sw_attrs);
212228

213229
for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) {
214-
uint32_t pte = XTENSA_MMU_PTE(page,
215-
shared ? XTENSA_MMU_SHARED_RING :
216-
XTENSA_MMU_KERNEL_RING,
217-
sw_attrs, attrs);
230+
uint32_t pte = XTENSA_MMU_PTE(page, ring, pte_sw, attrs);
218231
uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
219232
uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
220233

@@ -224,11 +237,12 @@ static void map_memory_range(const uint32_t start, const uint32_t end,
224237
__ASSERT(table != NULL, "There is no l2 page table available to "
225238
"map 0x%08x\n", page);
226239

227-
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
240+
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES,
241+
XTENSA_MMU_PTE_L2_ILLEGAL);
228242

229243
xtensa_kernel_ptables[l1_pos] =
230244
XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
231-
sw_attrs, XTENSA_MMU_PAGE_TABLE_ATTR);
245+
0, XTENSA_MMU_PAGE_TABLE_ATTR);
232246
}
233247

234248
table = (uint32_t *)(xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
@@ -237,26 +251,28 @@ static void map_memory_range(const uint32_t start, const uint32_t end,
237251
}
238252

239253
static void map_memory(const uint32_t start, const uint32_t end,
240-
const uint32_t attrs)
254+
const uint32_t attrs, const uint32_t options)
241255
{
242256
#ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
243257
uint32_t uc_attrs = attrs & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK;
244258
uint32_t c_attrs = attrs | XTENSA_MMU_CACHED_WB;
245259

246260
if (sys_cache_is_ptr_uncached((void *)start)) {
247-
map_memory_range(start, end, uc_attrs);
261+
map_memory_range(start, end, uc_attrs, options);
248262

249263
map_memory_range(POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)start)),
250-
POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)end)), c_attrs);
264+
POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)end)),
265+
c_attrs, options);
251266
} else if (sys_cache_is_ptr_cached((void *)start)) {
252-
map_memory_range(start, end, c_attrs);
267+
map_memory_range(start, end, c_attrs, options);
253268

254269
map_memory_range(POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)start)),
255-
POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)end)), uc_attrs);
270+
POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)end)),
271+
uc_attrs, options);
256272
} else
257273
#endif
258274
{
259-
map_memory_range(start, end, attrs);
275+
map_memory_range(start, end, attrs, options);
260276
}
261277
}
262278

@@ -270,19 +286,20 @@ static void xtensa_init_page_tables(void)
270286
}
271287
already_inited = true;
272288

273-
init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES);
289+
init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES,
290+
XTENSA_MMU_PTE_L1_ILLEGAL);
274291
atomic_set_bit(l1_page_table_track, 0);
275292

276293
for (entry = 0; entry < ARRAY_SIZE(mmu_zephyr_ranges); entry++) {
277294
const struct xtensa_mmu_range *range = &mmu_zephyr_ranges[entry];
278295

279-
map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL);
296+
map_memory(range->start, range->end, range->attrs, OPTION_SAVE_ATTRS);
280297
}
281298

282299
for (entry = 0; entry < xtensa_soc_mmu_ranges_num; entry++) {
283300
const struct xtensa_mmu_range *range = &xtensa_soc_mmu_ranges[entry];
284301

285-
map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL);
302+
map_memory(range->start, range->end, range->attrs, OPTION_SAVE_ATTRS);
286303
}
287304

288305
/* Finally, the direct-mapped pages used in the page tables
@@ -292,10 +309,10 @@ static void xtensa_init_page_tables(void)
292309
*/
293310
map_memory_range((uint32_t) &l1_page_table[0],
294311
(uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES],
295-
XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W);
312+
XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W, OPTION_SAVE_ATTRS);
296313
map_memory_range((uint32_t) &l2_page_tables[0],
297314
(uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES],
298-
XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W);
315+
XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W, OPTION_SAVE_ATTRS);
299316

300317
sys_cache_data_flush_all();
301318
}
@@ -375,7 +392,7 @@ static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
375392
return false;
376393
}
377394

378-
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
395+
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES, XTENSA_MMU_PTE_L2_ILLEGAL);
379396

380397
l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
381398
0, XTENSA_MMU_PAGE_TABLE_ATTR);
@@ -386,7 +403,9 @@ static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
386403
table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
387404
table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING :
388405
XTENSA_MMU_KERNEL_RING,
389-
0, flags);
406+
XTENSA_MMU_PTE_SW(XTENSA_MMU_KERNEL_RING,
407+
XTENSA_MMU_PTE_ATTR_ILLEGAL),
408+
flags);
390409

391410
sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0]));
392411
xtensa_tlb_autorefill_invalidate();
@@ -549,17 +568,24 @@ static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
549568

550569
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
551570

552-
l2_table[l2_pos] = XTENSA_MMU_PTE_ILLEGAL;
571+
/* Restore the PTE to previous ring and attributes. */
572+
l2_table[l2_pos] = restore_pte(l2_table[l2_pos]);
553573

554574
sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
555575

556576
for (l2_pos = 0; l2_pos < XTENSA_L2_PAGE_TABLE_ENTRIES; l2_pos++) {
557577
if (!is_pte_illegal(l2_table[l2_pos])) {
578+
/* If any PTE is mapped (== not illegal), we need to
579+
* keep this L2 table.
580+
*/
558581
goto end;
559582
}
560583
}
561584

562-
l1_table[l1_pos] = XTENSA_MMU_PTE_ILLEGAL;
585+
/* All L2 PTE are illegal (== nothing mapped), we can safely remove
586+
* the L2 table mapping in L1 table and return the L2 table to the pool.
587+
*/
588+
l1_table[l1_pos] = XTENSA_MMU_PTE_L1_ILLEGAL;
563589
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
564590

565591
table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
@@ -719,6 +745,37 @@ void xtensa_mmu_tlb_shootdown(void)
719745
arch_irq_unlock(key);
720746
}
721747

748+
/**
749+
* @brief Restore PTE ring and attributes from those stashed in SW bits.
750+
*
751+
* @param[in] pte Page table entry to be restored.
752+
*
753+
* @note This does not check if the SW bits contain ring and attributes to be
754+
* restored.
755+
*
756+
* @return PTE with restored ring and attributes. Illegal entry if original is
757+
* illegal.
758+
*/
759+
static inline uint32_t restore_pte(uint32_t pte)
760+
{
761+
uint32_t restored_pte;
762+
763+
uint32_t original_sw = XTENSA_MMU_PTE_SW_GET(pte);
764+
uint32_t original_attr = XTENSA_MMU_PTE_SW_ATTR_GET(original_sw);
765+
766+
if (original_attr != XTENSA_MMU_PTE_ATTR_ILLEGAL) {
767+
uint8_t original_ring = XTENSA_MMU_PTE_SW_RING_GET(original_sw);
768+
769+
restored_pte = pte;
770+
restored_pte = XTENSA_MMU_PTE_ATTR_SET(restored_pte, original_attr);
771+
restored_pte = XTENSA_MMU_PTE_RING_SET(restored_pte, original_ring);
772+
} else {
773+
restored_pte = XTENSA_MMU_PTE_L2_ILLEGAL;
774+
}
775+
776+
return restored_pte;
777+
}
778+
722779
#ifdef CONFIG_USERSPACE
723780

724781
static inline uint32_t *thread_page_tables_get(const struct k_thread *thread)
@@ -757,7 +814,7 @@ static uint32_t *dup_table(void)
757814

758815
if (is_pte_illegal(xtensa_kernel_ptables[i]) ||
759816
(i == XTENSA_MMU_L1_POS(XTENSA_MMU_PTEVADDR))) {
760-
dst_table[i] = XTENSA_MMU_PTE_ILLEGAL;
817+
dst_table[i] = XTENSA_MMU_PTE_L1_ILLEGAL;
761818
continue;
762819
}
763820

@@ -768,18 +825,7 @@ static uint32_t *dup_table(void)
768825
}
769826

770827
for (j = 0; j < XTENSA_L2_PAGE_TABLE_ENTRIES; j++) {
771-
uint32_t original_attr = XTENSA_MMU_PTE_SW_GET(src_l2_table[j]);
772-
773-
l2_table[j] = src_l2_table[j];
774-
if (original_attr != 0x0) {
775-
uint8_t ring;
776-
777-
ring = XTENSA_MMU_PTE_RING_GET(l2_table[j]);
778-
l2_table[j] = XTENSA_MMU_PTE_ATTR_SET(l2_table[j], original_attr);
779-
l2_table[j] = XTENSA_MMU_PTE_RING_SET(l2_table[j],
780-
ring == XTENSA_MMU_SHARED_RING ?
781-
XTENSA_MMU_SHARED_RING : XTENSA_MMU_KERNEL_RING);
782-
}
828+
l2_table[j] = restore_pte(src_l2_table[j]);
783829
}
784830

785831
/* The page table is using kernel ASID because we don't
@@ -848,10 +894,11 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
848894
}
849895

850896
static void region_map_update(uint32_t *ptables, uintptr_t start,
851-
size_t size, uint32_t ring, uint32_t flags)
897+
size_t size, uint32_t ring, uint32_t flags, uint32_t option)
852898
{
853899
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
854900
uint32_t *l2_table, pte;
901+
uint32_t new_ring, new_attrs;
855902
uint32_t page = start + offset;
856903
uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
857904
uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
@@ -862,8 +909,20 @@ static void region_map_update(uint32_t *ptables, uintptr_t start,
862909

863910
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
864911

865-
pte = XTENSA_MMU_PTE_RING_SET(l2_table[l2_pos], ring);
866-
pte = XTENSA_MMU_PTE_ATTR_SET(pte, flags);
912+
pte = l2_table[l2_pos];
913+
914+
if ((option & OPTION_RESTORE_ATTRS) == OPTION_RESTORE_ATTRS) {
915+
uint32_t original_sw = XTENSA_MMU_PTE_SW_GET(pte);
916+
917+
new_attrs = XTENSA_MMU_PTE_SW_ATTR_GET(original_sw);
918+
new_ring = XTENSA_MMU_PTE_SW_RING_GET(original_sw);
919+
} else {
920+
new_attrs = flags;
921+
new_ring = ring;
922+
}
923+
924+
pte = XTENSA_MMU_PTE_RING_SET(pte, new_ring);
925+
pte = XTENSA_MMU_PTE_ATTR_SET(pte, new_attrs);
867926

868927
l2_table[l2_pos] = pte;
869928

@@ -895,10 +954,10 @@ static void update_region(uint32_t *ptables, uintptr_t start, size_t size,
895954
new_flags_uc = (flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
896955
new_flags = new_flags_uc | XTENSA_MMU_CACHED_WB;
897956

898-
region_map_update(ptables, va, size, ring, new_flags);
899-
region_map_update(ptables, va_uc, size, ring, new_flags_uc);
957+
region_map_update(ptables, va, size, ring, new_flags, option);
958+
region_map_update(ptables, va_uc, size, ring, new_flags_uc, option);
900959
#else
901-
region_map_update(ptables, start, size, ring, flags);
960+
region_map_update(ptables, start, size, ring, flags, option);
902961
#endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
903962

904963
#if CONFIG_MP_MAX_NUM_CPUS > 1
@@ -914,7 +973,8 @@ static void update_region(uint32_t *ptables, uintptr_t start, size_t size,
914973
static inline void reset_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t option)
915974
{
916975
update_region(ptables, start, size,
917-
XTENSA_MMU_KERNEL_RING, XTENSA_MMU_PERM_W, option);
976+
XTENSA_MMU_KERNEL_RING, XTENSA_MMU_PERM_W,
977+
option | OPTION_RESTORE_ATTRS);
918978
}
919979

920980
void xtensa_user_stack_perms(struct k_thread *thread)

0 commit comments

Comments
 (0)