@@ -38,8 +38,8 @@ unsigned long rt_hw_set_domain_register(unsigned long domain_val)
3838{
3939 unsigned long old_domain ;
4040
41- asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain ));
42- asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val ) : "memory" );
41+ asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain ));
42+ asm volatile ("mcr p15, 0, %0, c3, c0\n" : : "r" (domain_val ) : "memory" );
4343
4444 return old_domain ;
4545}
@@ -49,9 +49,9 @@ void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd,
4949{
5050 volatile rt_uint32_t * pTT ;
5151 volatile int i , nSec ;
52- pTT = (rt_uint32_t * )MMUTable + (vaddrStart >> 20 );
52+ pTT = (rt_uint32_t * )MMUTable + (vaddrStart >> 20 );
5353 nSec = (vaddrEnd >> 20 ) - (vaddrStart >> 20 );
54- for (i = 0 ; i <= nSec ; i ++ )
54+ for (i = 0 ; i <= nSec ; i ++ )
5555 {
5656 * pTT = attr | (((paddrStart >> 20 ) + i ) << 20 );
5757 pTT ++ ;
@@ -83,13 +83,13 @@ void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
8383
8484#ifndef RT_USING_SMART
8585static void _init_map_section (rt_uint32_t * mmu_table , rt_uint32_t va ,
86- rt_uint32_t size ,rt_uint32_t pa , rt_uint32_t attr )
86+ rt_uint32_t size , rt_uint32_t pa , rt_uint32_t attr )
8787{
8888 volatile rt_uint32_t * ptt ;
8989 volatile int i , num_section ;
90- ptt = (rt_uint32_t * )mmu_table + (va >> ARCH_SECTION_SHIFT );
90+ ptt = (rt_uint32_t * )mmu_table + (va >> ARCH_SECTION_SHIFT );
9191 num_section = size >> ARCH_SECTION_SHIFT ;
92- for (i = 0 ; i <= num_section ; i ++ )
92+ for (i = 0 ; i <= num_section ; i ++ )
9393 {
9494 * ptt = attr | (((pa >> ARCH_SECTION_SHIFT ) + i ) << ARCH_SECTION_SHIFT );
9595 ptt ++ ;
@@ -98,25 +98,24 @@ static void _init_map_section(rt_uint32_t *mmu_table, rt_uint32_t va,
9898#endif
9999
100100void rt_hw_mem_setup_early (rt_uint32_t * early_mmu_talbe ,
101- rt_uint32_t pv_off )
101+ rt_uint32_t pv_off )
102102{
103- rt_uint32_t size = 0 ;
103+ rt_uint32_t size = 0 ;
104104
105- size = 0x100000 + (rt_uint32_t )& __bss_end ;
105+ size = 0x100000 + (rt_uint32_t )& __bss_end ;
106106 size &= ~(0x100000 - 1 );
107107#ifdef RT_USING_SMART
108108 size -= KERNEL_VADDR_START ;
109109 init_mm_setup (early_mmu_talbe , size , pv_off );
110110#else
111111 rt_uint32_t normal_attr = NORMAL_MEM ;
112112 extern unsigned char _reset ;
113- rt_uint32_t va = (rt_uint32_t ) & _reset ;
113+ rt_uint32_t va = (rt_uint32_t )& _reset ;
114114 /* The starting virtual address is aligned along 0x1000000. */
115115 va &= ~(0x1000000 - 1 );
116116 size -= va ;
117117 _init_map_section (early_mmu_talbe , va , size , va + pv_off , normal_attr );
118118#endif
119-
120119}
121120
122121void rt_hw_init_mmu_table (struct mem_desc * mdesc , rt_uint32_t size )
@@ -131,7 +130,7 @@ void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
131130#endif /* RT_USING_SMART */
132131
133132 /* set page table */
134- for (; size > 0 ; size -- )
133+ for (; size > 0 ; size -- )
135134 {
136135 if (mdesc -> paddr_start == (rt_uint32_t )ARCH_MAP_FAILED )
137136 mdesc -> paddr_start = mdesc -> vaddr_start + PV_OFFSET ;
@@ -142,11 +141,11 @@ void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
142141 mdesc -> attr , MMF_MAP_FIXED , & rt_mm_dummy_mapper , 0 );
143142
144143 rt_hw_mmu_setmtt (mdesc -> vaddr_start , mdesc -> vaddr_end ,
145- mdesc -> paddr_start , mdesc -> attr );
144+ mdesc -> paddr_start , mdesc -> attr );
146145 mdesc ++ ;
147146 }
148147
149- rt_hw_cpu_dcache_ops (RT_HW_CACHE_FLUSH , (void * )MMUTable , sizeof MMUTable );
148+ rt_hw_cpu_dcache_ops (RT_HW_CACHE_FLUSH , (void * )MMUTable , sizeof MMUTable );
150149}
151150
152151void rt_hw_mmu_init (void )
@@ -168,7 +167,7 @@ void rt_hw_mmu_init(void)
168167 rt_hw_cpu_dcache_enable ();
169168}
170169
171- int rt_hw_mmu_map_init (struct rt_aspace * aspace , void * v_address , size_t size , size_t * vtable , size_t pv_off )
170+ int rt_hw_mmu_map_init (struct rt_aspace * aspace , void * v_address , size_t size , size_t * vtable , size_t pv_off )
172171{
173172 size_t l1_off , va_s , va_e ;
174173
@@ -180,7 +179,7 @@ int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, s
180179 va_s = (size_t )v_address ;
181180 va_e = (size_t )v_address + size - 1 ;
182181
183- if ( va_e < va_s )
182+ if (va_e < va_s )
184183 {
185184 return -1 ;
186185 }
@@ -214,7 +213,7 @@ int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, s
214213 return 0 ;
215214}
216215
217- int rt_hw_mmu_ioremap_init (rt_aspace_t aspace , void * v_address , size_t size )
216+ int rt_hw_mmu_ioremap_init (rt_aspace_t aspace , void * v_address , size_t size )
218217{
219218#ifdef RT_IOREMAP_LATE
220219 size_t loop_va ;
@@ -243,10 +242,10 @@ int rt_hw_mmu_ioremap_init(rt_aspace_t aspace, void* v_address, size_t size)
243242 while (sections -- )
244243 {
245244 l1_off = (loop_va >> ARCH_SECTION_SHIFT );
246- mmu_l1 = (size_t * )aspace -> page_table + l1_off ;
245+ mmu_l1 = (size_t * )aspace -> page_table + l1_off ;
247246
248247 RT_ASSERT ((* mmu_l1 & ARCH_MMU_USED_MASK ) == 0 );
249- mmu_l2 = (size_t * )rt_pages_alloc (0 );
248+ mmu_l2 = (size_t * )rt_pages_alloc (0 );
250249 if (mmu_l2 )
251250 {
252251 rt_memset (mmu_l2 , 0 , ARCH_PAGE_TBL_SIZE * 2 );
@@ -271,8 +270,6 @@ int rt_hw_mmu_ioremap_init(rt_aspace_t aspace, void* v_address, size_t size)
271270}
272271
273272
274-
275-
276273static void _kenrel_unmap_4K (unsigned long * lv0_tbl , void * v_addr )
277274{
278275 size_t loop_va = (size_t )v_addr & ~ARCH_PAGE_MASK ;
@@ -424,7 +421,7 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
424421 }
425422}
426423
427- void * rt_hw_mmu_v2p (rt_aspace_t aspace , void * v_addr )
424+ void * rt_hw_mmu_v2p (rt_aspace_t aspace , void * v_addr )
428425{
429426 size_t l1_off , l2_off ;
430427 size_t * mmu_l1 , * mmu_l2 ;
@@ -435,41 +432,41 @@ void *rt_hw_mmu_v2p(rt_aspace_t aspace, void* v_addr)
435432
436433 RT_ASSERT (aspace );
437434
438- mmu_l1 = (size_t * )aspace -> page_table + l1_off ;
435+ mmu_l1 = (size_t * )aspace -> page_table + l1_off ;
439436
440437 tmp = * mmu_l1 ;
441438
442439 switch (tmp & ARCH_MMU_USED_MASK )
443440 {
444- case 0 : /* not used */
445- break ;
446- case 1 : /* page table */
447- mmu_l2 = (size_t * )((tmp & ~ARCH_PAGE_TBL_MASK ) - PV_OFFSET );
448- l2_off = (((size_t )v_addr & ARCH_SECTION_MASK ) >> ARCH_PAGE_SHIFT );
449- pa = * (mmu_l2 + l2_off );
450- if (pa & ARCH_MMU_USED_MASK )
441+ case 0 : /* not used */
442+ break ;
443+ case 1 : /* page table */
444+ mmu_l2 = (size_t * )((tmp & ~ARCH_PAGE_TBL_MASK ) - PV_OFFSET );
445+ l2_off = (((size_t )v_addr & ARCH_SECTION_MASK ) >> ARCH_PAGE_SHIFT );
446+ pa = * (mmu_l2 + l2_off );
447+ if (pa & ARCH_MMU_USED_MASK )
448+ {
449+ if ((pa & ARCH_MMU_USED_MASK ) == 1 )
451450 {
452- if ((pa & ARCH_MMU_USED_MASK ) == 1 )
453- {
454451 /* large page, not support */
455- break ;
456- }
457- pa &= ~(ARCH_PAGE_MASK );
458- pa += ((size_t )v_addr & ARCH_PAGE_MASK );
459- return (void * )pa ;
452+ break ;
460453 }
461- break ;
462- case 2 :
463- case 3 :
454+ pa &= ~(ARCH_PAGE_MASK );
455+ pa += ((size_t )v_addr & ARCH_PAGE_MASK );
456+ return (void * )pa ;
457+ }
458+ break ;
459+ case 2 :
460+ case 3 :
464461 /* section */
465- if (tmp & ARCH_TYPE_SUPERSECTION )
466- {
462+ if (tmp & ARCH_TYPE_SUPERSECTION )
463+ {
467464 /* super section, not support */
468- break ;
469- }
470- pa = (tmp & ~ARCH_SECTION_MASK );
471- pa += ((size_t )v_addr & ARCH_SECTION_MASK );
472- return (void * )pa ;
465+ break ;
466+ }
467+ pa = (tmp & ~ARCH_SECTION_MASK );
468+ pa += ((size_t )v_addr & ARCH_SECTION_MASK );
469+ return (void * )pa ;
473470 }
474471 return ARCH_MAP_FAILED ;
475472}
0 commit comments