From 053f8ef3faf47e8419353ba1028e16bf0872b2e1 Mon Sep 17 00:00:00 2001 From: leesinebee <37928229+leesinebee@users.noreply.github.com> Date: Tue, 29 Aug 2023 21:29:01 +0800 Subject: [PATCH 1/3] add cmd cputh --- components/finsh/cmd.c | 344 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 344 insertions(+) diff --git a/components/finsh/cmd.c b/components/finsh/cmd.c index c3b0760f6ab..a6afa4b19c1 100644 --- a/components/finsh/cmd.c +++ b/components/finsh/cmd.c @@ -1015,4 +1015,348 @@ int cmd_list(int argc, char **argv) } MSH_CMD_EXPORT_ALIAS(cmd_list, list, list objects); +#define RT_USING_CPUTH +#define CPUTH_USING_STATISTICS +#define CPUTH_TIMER_COUNTDOWN + +#ifdef RT_USING_CPUTH +#include +#include + +static rt_uint32_t CPU_TICK_CYCLE; +static rt_uint32_t CPUTH_CALC_PRD_TICK = RT_TICK_PER_SECOND; //in tick + +static rt_uint32_t start_tick,start_cycle; +static rt_thread_t th_from,th_to; +float percent; +div_t div_d; + +rt_weak rt_uint32_t cputh_get_cycle_per_tick(void) +{ +// #error "" + return 0; +} +rt_weak rt_uint32_t cputh_get_cycle_in_tick(void) +{ +// #error "" + return 0; +} +rt_weak rt_uint32_t cputh_get_cycle_overflow(void) +{ +// #error "" + return 0; +} +//#define mod(a,b) (a-b*(a/b)) +//#define mod(a,b) fmodf(a,b) +#define mod(a,b) lldiv(a,b).rem +#define ot(ct,st,cc,sc) (ct-st-(cc=CPU_TICK_CYCLE)) +#define ac(cc,sc) (mod((cc+sc),CPU_TICK_CYCLE)) +struct cputh_data +{ + rt_list_t list; +#ifdef RT_USING_CPUTH + rt_uint32_t to_tick; + rt_uint32_t to_cycle; + rt_uint32_t ticks; + rt_uint32_t cycles; + rt_uint32_t usage; +#ifdef CPUTH_USING_STATISTICS + rt_uint32_t cycled; + rt_uint32_t scheduled; + rt_uint32_t schedule_cnt; + rt_uint32_t overed; +#endif +#endif + +}; + +static void cputh_hook_of_scheduler(struct rt_thread* fth, struct rt_thread* tth) +{ + rt_uint32_t calc_tick; +// rt_uint32_t calc_cycle; + rt_uint32_t over_tick; + rt_uint32_t over_cycle; + + struct cputh_data * from = (struct cputh_data *)fth->user_data; + struct cputh_data * to = (struct cputh_data *)tth->user_data; + + to->to_cycle = cputh_get_cycle_in_tick(); +#ifdef CPUTH_TIMER_COUNTDOWN + to->to_cycle = CPU_TICK_CYCLE-to->to_cycle; +#endif + to->to_tick = rt_tick_get(); + if(cputh_get_cycle_overflow()) + { + to->to_tick += 1; + } + + calc_tick = start_tick+CPUTH_CALC_PRD_TICK; + +#ifdef CPUTH_USING_STATISTICS + to->schedule_cnt++; +#endif + + if(from==RT_NULL) + { + return; + } + + over_tick = ot(to->to_tick,calc_tick,to->to_cycle,start_cycle); + over_cycle = oc(to->to_cycle,start_cycle); + + if(over_tickobject_list; + struct cputh_data * data; + + rt_uint32_t ot1 = ot(calc_tick,from->to_tick,start_cycle,from->to_cycle); + rt_uint32_t oc1 = oc(start_cycle,from->to_cycle); + from->ticks = at(ot1,from->ticks,oc1,from->cycles); + from->cycles = ac(oc1,from->cycles); + + rt_uint64_t cycles; + rt_list_for_each(node, list) + { + thread = rt_container_of(node, struct rt_thread, parent.list); + data = (struct cputh_data *)thread->user_data; + cycles = (rt_uint64_t)data->ticks*CPU_TICK_CYCLE+data->cycles; + + + data->usage = percent*cycles; + +#ifdef CPUTH_USING_STATISTICS + data->cycled = cycles; + data->scheduled = data->schedule_cnt; + data->schedule_cnt = 0; + data->overed = 0; +#endif + data->ticks = 0; + data->cycles = 0; + } + from->ticks = mod(over_tick,CPUTH_CALC_PRD_TICK); + from->cycles = over_cycle; + + start_tick = calc_tick + over_tick-mod(over_tick,CPUTH_CALC_PRD_TICK); +#ifdef CPUTH_USING_STATISTICS + from->overed = over_tick/CPUTH_CALC_PRD_TICK; + th_from = fth; + th_to = tth; +#endif + } + else + { + rt_uint32_t ot1 = ot(to->to_tick,from->to_tick,to->to_cycle,from->to_cycle); + rt_uint32_t oc1 = oc(to->to_cycle,from->to_cycle); + from->ticks = at(ot1,from->ticks,oc1,from->cycles); + from->cycles = ac(oc1,from->cycles); + } +} + + + +void print_usage(void) +{ + struct rt_list_node *node = RT_NULL; + struct rt_thread *thread; + rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + struct cputh_data * data; + +#ifdef CPUTH_USING_STATISTICS + char ch; + + rt_kprintf("system up time in ticks %d\n",rt_tick_get()); + rt_kprintf("cpu usage check period in ticks %u\n",CPUTH_CALC_PRD_TICK); + rt_kprintf("cpu cycles per tick %u\n",CPU_TICK_CYCLE); + rt_kprintf("%-*.s usage overed cycled scheduled\n", RT_NAME_MAX, "thread"); + rt_kprintf("--------- ------ ---------- ---------- ----------\n"); + rt_enter_critical(); + rt_list_for_each(node, list) + { + thread = rt_container_of(node, struct rt_thread, parent.list); + data = (struct cputh_data *)thread->user_data; + ch = thread==th_from?'>':(thread==th_to?'<':' '); + rt_kprintf("%c%-*.*s",ch,RT_NAME_MAX, RT_NAME_MAX, thread->parent.name); + rt_kprintf(" %2u.%02u%% 0x%08x 0x%08x 0x%08x\n", data->usage/100,data->usage%100,data->overed,(rt_uint32_t)data->cycled,data->scheduled); + } + rt_exit_critical(); +#else + rt_kprintf("%-*.s usage\n", RT_NAME_MAX, "thread"); + rt_kprintf("-------- ------\n"); + rt_enter_critical(); + rt_list_for_each(node, list) + { + thread = rt_container_of(node, struct rt_thread, parent.list); + rt_kprintf("%-*.*s %2u.%02u%%\n", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name, thread->parent.usage/100,thread->usage%100); + } + rt_exit_critical(); +#endif +} + +#define EVENT_CPU_UPDATE_PERIOD (1<<0) +#define EVENT_CPU_UPDATE_MANUAL (1<<1) + +static struct rt_event event_cpu; + +static struct rt_timer tim_cpu; + +static void rt_timer_cpu(void * parameter) +{ + rt_event_send(&event_cpu,EVENT_CPU_UPDATE_PERIOD); +} + + +static rt_err_t _rx_ind(rt_device_t dev, rt_size_t size) +{ + RT_ASSERT(dev != RT_NULL); + + rt_event_send(&event_cpu,EVENT_CPU_UPDATE_MANUAL); + + return RT_EOK; +} + + + +void cputh(int argc, char**argv) +{ + start_tick = 0; + start_cycle = 0; + //init cputh data list + struct rt_list_node *node = RT_NULL; + struct rt_thread *thread; + rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + + rt_list_for_each(node, list) + { + void * data = rt_malloc(sizeof(struct cputh_data)); + rt_memset(data, 0, sizeof(struct cputh_data)); + thread = rt_container_of(node, struct rt_thread, parent.list); + thread->user_data = (rt_ubase_t)data; + } + + + rt_int32_t num = -1; + if(argc>1) + { + CPUTH_CALC_PRD_TICK = atoi(argv[1]); + if(CPUTH_CALC_PRD_TICK==0) + { + CPUTH_CALC_PRD_TICK = RT_TICK_PER_SECOND; + } + } + if(argc>2) + { + + num = atoi(argv[2]); + } + + CPU_TICK_CYCLE = cputh_get_cycle_per_tick(); + start_tick = rt_tick_get(); + start_cycle = cputh_get_cycle_in_tick(); + #ifdef CPUTH_TIMER_COUNTDOWN + start_cycle = CPU_TICK_CYCLE-start_cycle; + #endif + + percent = 10000.0f/CPUTH_CALC_PRD_TICK/CPU_TICK_CYCLE; + +// rt_sem_init(&sem_cputh,"semcpu",0,RT_IPC_FLAG_FIFO); + + rt_timer_init(&tim_cpu,"timcpu",rt_timer_cpu,RT_NULL,CPUTH_CALC_PRD_TICK,RT_TIMER_FLAG_PERIODIC); + + rt_event_init(&event_cpu, "evtcpu", RT_IPC_FLAG_FIFO); + + rt_device_t console = rt_console_get_device(); + rt_err_t (*rx_ind)(rt_device_t dev,rt_size_t size) = console->rx_indicate; + rt_device_set_rx_indicate(console, _rx_ind); + + rt_kprintf("\033c"); //redraw screen + rt_kprintf("\033[s"); //save cursor position + rt_kprintf("\033[?25l"); //hide cursor + rt_kprintf("\033[u"); //restore cursor positon + print_usage(); + + + + thread=rt_thread_self(); + ((struct cputh_data*)thread->user_data)->to_tick = start_tick; + ((struct cputh_data*)thread->user_data)->to_cycle = start_cycle; + + //set hook + rt_scheduler_sethook(cputh_hook_of_scheduler); + + rt_timer_start(&tim_cpu); + + while(num) + { + + rt_uint32_t e; +// rt_sem_take(&sem_cputh,RT_WAITING_FOREVER); + rt_event_recv(&event_cpu, + (EVENT_CPU_UPDATE_PERIOD | EVENT_CPU_UPDATE_MANUAL), + RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, + RT_WAITING_FOREVER,&e); + if(e&EVENT_CPU_UPDATE_MANUAL) + { + char ach; + + rt_size_t size; +__repeat: + size = rt_device_read(console, -1, &ach, 1); + + if(size==0) + { + continue; + } + + + if(ach == 0x03) + { + break; + } + if(ach == 'q') + { + break; + } + + //size!=0 and other input + rt_kprintf("\033[u"); //restore cursor positon, + print_usage(); + goto __repeat; + + } + rt_kprintf("\033[u"); //restore cursor positon, +// rt_kprintf("\033[0J"); //clear content after cursor + print_usage(); + if(num>0) + { + num--; + } + } + rt_kprintf("\033[?25h"); //show cursor + //reset hook + rt_scheduler_sethook(RT_NULL); + + rt_timer_stop(&tim_cpu); + rt_timer_detach(&tim_cpu); + + rt_device_set_rx_indicate(console, rx_ind); + +// rt_sem_detach(&sem_cputh); + rt_event_detach(&event_cpu); + //release cputh data + rt_list_for_each(node, list) + { + thread = rt_container_of(node, struct rt_thread, parent.list); + rt_free((void*)thread->user_data); + thread->user_data = 0; + + } + +} +MSH_CMD_EXPORT(cputh,cpu usage of thread); +#endif + #endif /* RT_USING_FINSH */ From 646740237983136084754412bd169e5be9f3e0f1 Mon Sep 17 00:00:00 2001 From: leesinebee <37928229+leesinebee@users.noreply.github.com> Date: Tue, 29 Aug 2023 22:56:12 +0800 Subject: [PATCH 2/3] add cputh command add kconfig options --- components/finsh/Kconfig | 13 + components/finsh/cmd.c | 550 ++++++++++++++++++++------------------- 2 files changed, 302 insertions(+), 261 deletions(-) diff --git a/components/finsh/Kconfig b/components/finsh/Kconfig index 2d9d768e239..68a66625269 100644 --- a/components/finsh/Kconfig +++ b/components/finsh/Kconfig @@ -76,4 +76,17 @@ if RT_USING_MSH int "The number of arguments for a shell command" default 10 + config FINSH_USING_CPUTH + bool "Enable cputh" + default n + + if FINSH_USING_CPUTH + config CPUTH_USING_STATISTICS + bool "Enable cputh statistics" + default n + config CPUTH_TIMER_COUNTDOWN + bool "Enable cputh hardware timer count down" + default n + endif + endif diff --git a/components/finsh/cmd.c b/components/finsh/cmd.c index a6afa4b19c1..8aa028390f9 100644 --- a/components/finsh/cmd.c +++ b/components/finsh/cmd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2022, RT-Thread Development Team + * Copyright (c) 2006-2023, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * @@ -30,6 +30,7 @@ * Provide protection for the "first layer of objects" when list_* * 2020-04-07 chenhui add clear * 2022-07-02 Stanley Lwin add list command + * 2023-09-25 leesinebee add cputh command */ #include @@ -1015,348 +1016,375 @@ int cmd_list(int argc, char **argv) } MSH_CMD_EXPORT_ALIAS(cmd_list, list, list objects); -#define RT_USING_CPUTH -#define CPUTH_USING_STATISTICS -#define CPUTH_TIMER_COUNTDOWN -#ifdef RT_USING_CPUTH -#include -#include +#ifdef FINSH_USING_CPUTH -static rt_uint32_t CPU_TICK_CYCLE; -static rt_uint32_t CPUTH_CALC_PRD_TICK = RT_TICK_PER_SECOND; //in tick +#include -static rt_uint32_t start_tick,start_cycle; -static rt_thread_t th_from,th_to; -float percent; -div_t div_d; +struct cputh_data +{ + rt_uint32_t to_tick; /**< value of rt_tick when schedule to a thread */ + rt_uint32_t to_cycle; /**< value of hardware timer counter when schedule to a thread */ + rt_uint32_t cycles; /**< cpu hardware cycles used by thread */ + rt_uint32_t index; /**< cputh data buffer index */ +#ifdef CPUTH_USING_STATISTICS + rt_uint32_t scheduled; /**< number of times that thread has been scheduled */ + rt_uint32_t overed; /**< number of times that thread runs time exceeds the cputh period */ +#endif +}; + +struct cputh_data_pingpang +{ + struct cputh_data data[2]; +}; + +static rt_uint32_t index_now; +static rt_uint32_t cputh_index; +static rt_uint32_t cycle_per_tick; +static rt_uint32_t calc_prd; + +static rt_uint32_t start_tick; +static rt_uint32_t start_cycle; + +#define MOD(a,b) (a%b) +#define OT(ct,st,cc,sc) (ct-st-(cc=CPU_TICK_CYCLE)) -#define ac(cc,sc) (mod((cc+sc),CPU_TICK_CYCLE)) -struct cputh_data -{ - rt_list_t list; -#ifdef RT_USING_CPUTH - rt_uint32_t to_tick; - rt_uint32_t to_cycle; - rt_uint32_t ticks; - rt_uint32_t cycles; - rt_uint32_t usage; -#ifdef CPUTH_USING_STATISTICS - rt_uint32_t cycled; - rt_uint32_t scheduled; - rt_uint32_t schedule_cnt; - rt_uint32_t overed; -#endif -#endif - -}; +/** + * @brief scheduler hook function. + * + * @param fth is the thread which cpu will leave from. + * + * @param tth is the thread which cpu will enter to. + * + */ static void cputh_hook_of_scheduler(struct rt_thread* fth, struct rt_thread* tth) { rt_uint32_t calc_tick; -// rt_uint32_t calc_cycle; rt_uint32_t over_tick; rt_uint32_t over_cycle; - - struct cputh_data * from = (struct cputh_data *)fth->user_data; - struct cputh_data * to = (struct cputh_data *)tth->user_data; - - to->to_cycle = cputh_get_cycle_in_tick(); + rt_uint32_t sw_tick; + rt_uint32_t sw_cycle; + + struct cputh_data_pingpang * from = (struct cputh_data_pingpang *)fth->user_data; + struct cputh_data_pingpang * to = (struct cputh_data_pingpang *)tth->user_data; + + sw_cycle = cputh_get_cycle_in_tick(); #ifdef CPUTH_TIMER_COUNTDOWN - to->to_cycle = CPU_TICK_CYCLE-to->to_cycle; + sw_cycle = cycle_per_tick-sw_cycle; #endif - to->to_tick = rt_tick_get(); + sw_tick = rt_tick_get(); + /* if hardware timer overflowed, update tick */ if(cputh_get_cycle_overflow()) { - to->to_tick += 1; + sw_tick += 1; } - - calc_tick = start_tick+CPUTH_CALC_PRD_TICK; - -#ifdef CPUTH_USING_STATISTICS - to->schedule_cnt++; -#endif - + + calc_tick = start_tick+calc_prd; + if(from==RT_NULL) { return; } - over_tick = ot(to->to_tick,calc_tick,to->to_cycle,start_cycle); - over_cycle = oc(to->to_cycle,start_cycle); - + if(to->data[cputh_index].index!=from->data[cputh_index].index) + { + to->data[cputh_index].index=from->data[cputh_index].index; +#ifdef CPUTH_USING_STATISTICS + to->data[cputh_index].scheduled = 0; + to->data[cputh_index].overed = 0; +#endif + to->data[cputh_index].cycles = 0; + } + + over_tick = OT(sw_tick,calc_tick,sw_cycle,start_cycle); + over_cycle = OC(sw_cycle,start_cycle); + if(over_tickobject_list; - struct cputh_data * data; - - rt_uint32_t ot1 = ot(calc_tick,from->to_tick,start_cycle,from->to_cycle); - rt_uint32_t oc1 = oc(start_cycle,from->to_cycle); - from->ticks = at(ot1,from->ticks,oc1,from->cycles); - from->cycles = ac(oc1,from->cycles); - - rt_uint64_t cycles; - rt_list_for_each(node, list) - { - thread = rt_container_of(node, struct rt_thread, parent.list); - data = (struct cputh_data *)thread->user_data; - cycles = (rt_uint64_t)data->ticks*CPU_TICK_CYCLE+data->cycles; - - - data->usage = percent*cycles; - + rt_uint32_t ot1 = OT(calc_tick,from->data[cputh_index].to_tick,start_cycle,from->data[cputh_index].to_cycle); + rt_uint32_t oc1 = OC(start_cycle,from->data[cputh_index].to_cycle); + rt_uint32_t overed = over_tick/calc_prd; + rt_uint32_t rem_tick = over_tick-overed*calc_prd; + from->data[cputh_index].cycles += ot1*cycle_per_tick+oc1; #ifdef CPUTH_USING_STATISTICS - data->cycled = cycles; - data->scheduled = data->schedule_cnt; - data->schedule_cnt = 0; - data->overed = 0; + from->data[cputh_index].overed = over_tick/calc_prd; #endif - data->ticks = 0; - data->cycles = 0; - } - from->ticks = mod(over_tick,CPUTH_CALC_PRD_TICK); - from->cycles = over_cycle; + cputh_index ^= 1; + index_now +=1; + start_tick = calc_tick + over_tick-rem_tick; + + from->data[cputh_index].cycles = rem_tick*cycle_per_tick+over_cycle; + from->data[cputh_index].index = index_now; + + to->data[cputh_index].cycles = 0; + to->data[cputh_index].to_tick = sw_tick; + to->data[cputh_index].to_cycle = sw_cycle; + to->data[cputh_index].index = index_now; - start_tick = calc_tick + over_tick-mod(over_tick,CPUTH_CALC_PRD_TICK); #ifdef CPUTH_USING_STATISTICS - from->overed = over_tick/CPUTH_CALC_PRD_TICK; - th_from = fth; - th_to = tth; + from->data[cputh_index].scheduled = 0; + to->data[cputh_index].scheduled = 1; #endif } else { - rt_uint32_t ot1 = ot(to->to_tick,from->to_tick,to->to_cycle,from->to_cycle); - rt_uint32_t oc1 = oc(to->to_cycle,from->to_cycle); - from->ticks = at(ot1,from->ticks,oc1,from->cycles); - from->cycles = ac(oc1,from->cycles); + rt_uint32_t ot1 = OT(sw_tick,from->data[cputh_index].to_tick,sw_cycle,from->data[cputh_index].to_cycle); + rt_uint32_t oc1 = OC(sw_cycle,from->data[cputh_index].to_cycle); + from->data[cputh_index].cycles += ot1*cycle_per_tick+oc1; + to->data[cputh_index].to_tick = sw_tick; + to->data[cputh_index].to_cycle = sw_cycle; +#ifdef CPUTH_USING_STATISTICS + to->data[cputh_index].scheduled++; +#endif } } - - void print_usage(void) { - struct rt_list_node *node = RT_NULL; + struct rt_list_node *node = RT_NULL; struct rt_thread *thread; - rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; - struct cputh_data * data; - + rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + struct cputh_data * data; + rt_uint32_t usage; + #ifdef CPUTH_USING_STATISTICS - char ch; - - rt_kprintf("system up time in ticks %d\n",rt_tick_get()); - rt_kprintf("cpu usage check period in ticks %u\n",CPUTH_CALC_PRD_TICK); - rt_kprintf("cpu cycles per tick %u\n",CPU_TICK_CYCLE); + + rt_kprintf("tick %d\n",rt_tick_get()); rt_kprintf("%-*.s usage overed cycled scheduled\n", RT_NAME_MAX, "thread"); - rt_kprintf("--------- ------ ---------- ---------- ----------\n"); + rt_kprintf("-------- ------- ---------- ---------- ----------\n"); rt_enter_critical(); rt_list_for_each(node, list) { - thread = rt_container_of(node, struct rt_thread, parent.list); - data = (struct cputh_data *)thread->user_data; - ch = thread==th_from?'>':(thread==th_to?'<':' '); - rt_kprintf("%c%-*.*s",ch,RT_NAME_MAX, RT_NAME_MAX, thread->parent.name); - rt_kprintf(" %2u.%02u%% 0x%08x 0x%08x 0x%08x\n", data->usage/100,data->usage%100,data->overed,(rt_uint32_t)data->cycled,data->scheduled); + thread = rt_container_of(node, struct rt_thread, parent.list); + data = &((struct cputh_data_pingpang *)thread->user_data)->data[cputh_index^1]; + usage = data->cycles*10000.0f/calc_prd/cycle_per_tick; + rt_kprintf("%-*.*s",RT_NAME_MAX, RT_NAME_MAX, thread->parent.name); + if(data->index+1 == index_now) + { + rt_kprintf(" %3u.%02u%% 0x%08x 0x%08x 0x%08x\n", usage/100,usage%100,data->overed,data->cycles,data->scheduled); + } + else + { + rt_kprintf(" %3u.%02u%% 0x%08x 0x%08x 0x%08x\n", 0,0,0,0,0); + } } rt_exit_critical(); #else - rt_kprintf("%-*.s usage\n", RT_NAME_MAX, "thread"); - rt_kprintf("-------- ------\n"); + rt_kprintf("tick %d\n",rt_tick_get()); + rt_kprintf("%-*.s usage\n", RT_NAME_MAX, "thread"); + rt_kprintf("-------- -------\n"); rt_enter_critical(); rt_list_for_each(node, list) { - thread = rt_container_of(node, struct rt_thread, parent.list); - rt_kprintf("%-*.*s %2u.%02u%%\n", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name, thread->parent.usage/100,thread->usage%100); + thread = rt_container_of(node, struct rt_thread, parent.list); + data = &((struct cputh_data_pingpang *)thread->user_data)->data[cputh_index^1]; + usage = data->cycles*10000.0f/calc_prd/cycle_per_tick; + if(data->index+1 == index_now) + { + rt_kprintf("%-*.*s %3u.%02u%%\n", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name, usage/100, usage%100); + } + else + { + rt_kprintf("%-*.*s %3u.%02u%%\n", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name, 0, 0); + } } rt_exit_critical(); -#endif +#endif } -#define EVENT_CPU_UPDATE_PERIOD (1<<0) -#define EVENT_CPU_UPDATE_MANUAL (1<<1) -static struct rt_event event_cpu; +#define EVENT_CPU_UPDATE_PERIOD (1<<0) +#define EVENT_CPU_UPDATE_MANUAL (1<<1) +static struct rt_event event_cpu; static struct rt_timer tim_cpu; static void rt_timer_cpu(void * parameter) { - rt_event_send(&event_cpu,EVENT_CPU_UPDATE_PERIOD); + rt_event_send(&event_cpu,EVENT_CPU_UPDATE_PERIOD); } - static rt_err_t _rx_ind(rt_device_t dev, rt_size_t size) { RT_ASSERT(dev != RT_NULL); - rt_event_send(&event_cpu,EVENT_CPU_UPDATE_MANUAL); - return RT_EOK; } +static void release_data(void) +{ + struct rt_list_node *node = RT_NULL; + struct rt_thread *thread; + rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + rt_list_for_each(node, list) + { + thread = rt_container_of(node, struct rt_thread, parent.list); + if(thread->user_data!=RT_NULL) + { + rt_free((void*)thread->user_data); + thread->user_data = RT_NULL; + } + } +} - -void cputh(int argc, char**argv) +static void cputh(int argc, char**argv) { - start_tick = 0; - start_cycle = 0; - //init cputh data list - struct rt_list_node *node = RT_NULL; - struct rt_thread *thread; - rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; - - rt_list_for_each(node, list) - { - void * data = rt_malloc(sizeof(struct cputh_data)); - rt_memset(data, 0, sizeof(struct cputh_data)); - thread = rt_container_of(node, struct rt_thread, parent.list); - thread->user_data = (rt_ubase_t)data; - } - - - rt_int32_t num = -1; - if(argc>1) - { - CPUTH_CALC_PRD_TICK = atoi(argv[1]); - if(CPUTH_CALC_PRD_TICK==0) - { - CPUTH_CALC_PRD_TICK = RT_TICK_PER_SECOND; - } - } - if(argc>2) - { - - num = atoi(argv[2]); - } - - CPU_TICK_CYCLE = cputh_get_cycle_per_tick(); - start_tick = rt_tick_get(); + cputh_index = 0; + index_now = 0; + calc_prd = RT_TICK_PER_SECOND; + start_tick = 0; + start_cycle = 0; + + struct rt_list_node *node = RT_NULL; + struct rt_thread *thread; + rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + + rt_list_for_each(node, list) + { + void * data = rt_malloc(sizeof(struct cputh_data_pingpang)); + if(data == RT_NULL) + { + rt_kprintf("failed to init cputh data\n"); + release_data(); + return; + } + rt_memset(data, 0, sizeof(struct cputh_data_pingpang)); + thread = rt_container_of(node, struct rt_thread, parent.list); + thread->user_data = (rt_ubase_t)data; + } + + rt_int32_t num = -1; + if(argc>1) + { + calc_prd = atoi(argv[1]); + if(calc_prd==0) + { + calc_prd = RT_TICK_PER_SECOND; + } + } + if(argc>2) + { + num = atoi(argv[2]); + } + + cycle_per_tick = cputh_get_cycle_per_tick(); + + rt_timer_init(&tim_cpu,"timcpu",rt_timer_cpu,RT_NULL,calc_prd,RT_TIMER_FLAG_PERIODIC); + rt_event_init(&event_cpu, "evtcpu", RT_IPC_FLAG_FIFO); + + rt_device_t console = rt_console_get_device(); + rt_err_t (*rx_ind)(rt_device_t dev,rt_size_t size) = console->rx_indicate; + /* take over console input */ + rt_device_set_rx_indicate(console, _rx_ind); + rt_enter_critical(); + start_tick = rt_tick_get(); start_cycle = cputh_get_cycle_in_tick(); - #ifdef CPUTH_TIMER_COUNTDOWN - start_cycle = CPU_TICK_CYCLE-start_cycle; - #endif +#ifdef CPUTH_TIMER_COUNTDOWN + start_cycle = cycle_per_tick-start_cycle; +#endif + + thread=rt_thread_self(); + ((struct cputh_data*)thread->user_data)->to_tick = start_tick; + ((struct cputh_data*)thread->user_data)->to_cycle = start_cycle; + + rt_scheduler_sethook(cputh_hook_of_scheduler); + rt_timer_start(&tim_cpu); + rt_exit_critical(); - percent = 10000.0f/CPUTH_CALC_PRD_TICK/CPU_TICK_CYCLE; - -// rt_sem_init(&sem_cputh,"semcpu",0,RT_IPC_FLAG_FIFO); - - rt_timer_init(&tim_cpu,"timcpu",rt_timer_cpu,RT_NULL,CPUTH_CALC_PRD_TICK,RT_TIMER_FLAG_PERIODIC); - - rt_event_init(&event_cpu, "evtcpu", RT_IPC_FLAG_FIFO); - - rt_device_t console = rt_console_get_device(); - rt_err_t (*rx_ind)(rt_device_t dev,rt_size_t size) = console->rx_indicate; - rt_device_set_rx_indicate(console, _rx_ind); - - rt_kprintf("\033c"); //redraw screen - rt_kprintf("\033[s"); //save cursor position - rt_kprintf("\033[?25l"); //hide cursor - rt_kprintf("\033[u"); //restore cursor positon - print_usage(); - - - - thread=rt_thread_self(); - ((struct cputh_data*)thread->user_data)->to_tick = start_tick; - ((struct cputh_data*)thread->user_data)->to_cycle = start_cycle; - - //set hook - rt_scheduler_sethook(cputh_hook_of_scheduler); - - rt_timer_start(&tim_cpu); - - while(num) - { - - rt_uint32_t e; -// rt_sem_take(&sem_cputh,RT_WAITING_FOREVER); - rt_event_recv(&event_cpu, - (EVENT_CPU_UPDATE_PERIOD | EVENT_CPU_UPDATE_MANUAL), - RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, - RT_WAITING_FOREVER,&e); - if(e&EVENT_CPU_UPDATE_MANUAL) - { - char ach; - - rt_size_t size; + rt_kprintf("cputh period %u, ",calc_prd); + rt_kprintf("cycles per tick %u\n",cycle_per_tick); + + /* redraw screen */ + rt_kprintf("\033c"); + /* save cursor position */ + rt_kprintf("\033[s"); + /* hide cursor */ + rt_kprintf("\033[?25l"); + /* restore cursor positon */ + rt_kprintf("\033[u"); + print_usage(); + + while(num) + { + rt_uint32_t e; + rt_event_recv(&event_cpu, + (EVENT_CPU_UPDATE_PERIOD | EVENT_CPU_UPDATE_MANUAL), + RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, + RT_WAITING_FOREVER,&e); + if(e&EVENT_CPU_UPDATE_MANUAL) + { + char ach; + rt_size_t size; __repeat: - size = rt_device_read(console, -1, &ach, 1); - - if(size==0) - { - continue; - } - - - if(ach == 0x03) - { - break; - } - if(ach == 'q') - { - break; - } - - //size!=0 and other input - rt_kprintf("\033[u"); //restore cursor positon, - print_usage(); - goto __repeat; - - } - rt_kprintf("\033[u"); //restore cursor positon, -// rt_kprintf("\033[0J"); //clear content after cursor - print_usage(); - if(num>0) - { - num--; - } - } - rt_kprintf("\033[?25h"); //show cursor - //reset hook - rt_scheduler_sethook(RT_NULL); - - rt_timer_stop(&tim_cpu); - rt_timer_detach(&tim_cpu); - - rt_device_set_rx_indicate(console, rx_ind); - -// rt_sem_detach(&sem_cputh); - rt_event_detach(&event_cpu); - //release cputh data - rt_list_for_each(node, list) - { - thread = rt_container_of(node, struct rt_thread, parent.list); - rt_free((void*)thread->user_data); - thread->user_data = 0; - - } - + size = rt_device_read(console, -1, &ach, 1); + + if(size==0) + { + continue; + } + /* ctrl+c */ + if(ach == 0x03) + { + break; + } + if(ach == 'q') + { + break; + } + + rt_kprintf("\033[u"); + print_usage(); + goto __repeat; + } + + rt_kprintf("\033[u"); + print_usage(); + if(num>0) + { + num--; + } + } + /* show cursor */ + rt_kprintf("\033[?25h"); + /* reset hook */ + rt_scheduler_sethook(RT_NULL); + + rt_timer_stop(&tim_cpu); + rt_timer_detach(&tim_cpu); + /* hand over console input */ + rt_device_set_rx_indicate(console, rx_ind); + rt_event_detach(&event_cpu); + /* release cputh data */ + release_data(); + } -MSH_CMD_EXPORT(cputh,cpu usage of thread); -#endif +MSH_CMD_EXPORT(cputh,cputh [period] [counts]); +#endif /* FINSH_USING_CPUTH */ #endif /* RT_USING_FINSH */ From 7398973d141b44e26b4e8c91fa3a457077c972a9 Mon Sep 17 00:00:00 2001 From: leesinebee <37928229+leesinebee@users.noreply.github.com> Date: Tue, 26 Sep 2023 11:25:16 +0800 Subject: [PATCH 3/3] astyle format --- components/finsh/cmd.c | 218 ++++++++++++++++++++--------------------- 1 file changed, 109 insertions(+), 109 deletions(-) diff --git a/components/finsh/cmd.c b/components/finsh/cmd.c index 8aa028390f9..93644236685 100644 --- a/components/finsh/cmd.c +++ b/components/finsh/cmd.c @@ -57,7 +57,7 @@ long version(void) return 0; } -MSH_CMD_EXPORT(version, show RT-Thread version information); +MSH_CMD_EXPORT(version, show RT - Thread version information); rt_inline void object_split(int len) { @@ -445,26 +445,26 @@ long list_mutex(void) if (!rt_list_isempty(&m->parent.suspend_thread)) { rt_kprintf("%-*.*s %-8.*s %04d %8d %04d ", - maxlen, RT_NAME_MAX, - m->parent.parent.name, - RT_NAME_MAX, - m->owner->parent.name, - m->hold, - m->priority, - rt_list_len(&m->parent.suspend_thread)); + maxlen, RT_NAME_MAX, + m->parent.parent.name, + RT_NAME_MAX, + m->owner->parent.name, + m->hold, + m->priority, + rt_list_len(&m->parent.suspend_thread)); show_wait_queue(&(m->parent.suspend_thread)); rt_kprintf("\n"); } else { rt_kprintf("%-*.*s %-8.*s %04d %8d %04d\n", - maxlen, RT_NAME_MAX, - m->parent.parent.name, - RT_NAME_MAX, - m->owner->parent.name, - m->hold, - m->priority, - rt_list_len(&m->parent.suspend_thread)); + maxlen, RT_NAME_MAX, + m->parent.parent.name, + RT_NAME_MAX, + m->owner->parent.name, + m->hold, + m->priority, + rt_list_len(&m->parent.suspend_thread)); } } } @@ -885,7 +885,7 @@ long list_device(void) device = (struct rt_device *)obj; device_type = "Unknown"; if (device->type < RT_Device_Class_Unknown && - device_type_str[device->type] != RT_NULL) + device_type_str[device->type] != RT_NULL) { device_type = device_type_str[device->type]; } @@ -906,66 +906,66 @@ long list_device(void) int cmd_list(int argc, char **argv) { - if(argc == 2) + if (argc == 2) { - if(strcmp(argv[1], "thread") == 0) + if (strcmp(argv[1], "thread") == 0) { list_thread(); } - else if(strcmp(argv[1], "timer") == 0) + else if (strcmp(argv[1], "timer") == 0) { list_timer(); } #ifdef RT_USING_SEMAPHORE - else if(strcmp(argv[1], "sem") == 0) + else if (strcmp(argv[1], "sem") == 0) { list_sem(); } #endif /* RT_USING_SEMAPHORE */ #ifdef RT_USING_EVENT - else if(strcmp(argv[1], "event") == 0) + else if (strcmp(argv[1], "event") == 0) { list_event(); } #endif /* RT_USING_EVENT */ #ifdef RT_USING_MUTEX - else if(strcmp(argv[1], "mutex") == 0) + else if (strcmp(argv[1], "mutex") == 0) { list_mutex(); } #endif /* RT_USING_MUTEX */ #ifdef RT_USING_MAILBOX - else if(strcmp(argv[1], "mailbox") == 0) + else if (strcmp(argv[1], "mailbox") == 0) { list_mailbox(); } #endif /* RT_USING_MAILBOX */ #ifdef RT_USING_MESSAGEQUEUE - else if(strcmp(argv[1], "msgqueue") == 0) + else if (strcmp(argv[1], "msgqueue") == 0) { list_msgqueue(); } #endif /* RT_USING_MESSAGEQUEUE */ #ifdef RT_USING_MEMHEAP - else if(strcmp(argv[1], "memheap") == 0) + else if (strcmp(argv[1], "memheap") == 0) { list_memheap(); } #endif /* RT_USING_MEMHEAP */ #ifdef RT_USING_MEMPOOL - else if(strcmp(argv[1], "mempool") == 0) + else if (strcmp(argv[1], "mempool") == 0) { list_mempool(); } #endif /* RT_USING_MEMPOOL */ #ifdef RT_USING_DEVICE - else if(strcmp(argv[1], "device") == 0) + else if (strcmp(argv[1], "device") == 0) { list_device(); } #endif /* RT_USING_DEVICE */ #ifdef RT_USING_DFS - else if(strcmp(argv[1], "fd") == 0) + else if (strcmp(argv[1], "fd") == 0) { extern int list_fd(void); list_fd(); @@ -1022,7 +1022,7 @@ MSH_CMD_EXPORT_ALIAS(cmd_list, list, list objects); #include struct cputh_data -{ +{ rt_uint32_t to_tick; /**< value of rt_tick when schedule to a thread */ rt_uint32_t to_cycle; /**< value of hardware timer counter when schedule to a thread */ rt_uint32_t cycles; /**< cpu hardware cycles used by thread */ @@ -1083,7 +1083,7 @@ rt_weak rt_uint32_t cputh_get_cycle_overflow(void) * @param tth is the thread which cpu will enter to. * */ -static void cputh_hook_of_scheduler(struct rt_thread* fth, struct rt_thread* tth) +static void cputh_hook_of_scheduler(struct rt_thread *fth, struct rt_thread *tth) { rt_uint32_t calc_tick; rt_uint32_t over_tick; @@ -1091,30 +1091,30 @@ static void cputh_hook_of_scheduler(struct rt_thread* fth, struct rt_thread* tth rt_uint32_t sw_tick; rt_uint32_t sw_cycle; - struct cputh_data_pingpang * from = (struct cputh_data_pingpang *)fth->user_data; - struct cputh_data_pingpang * to = (struct cputh_data_pingpang *)tth->user_data; + struct cputh_data_pingpang *from = (struct cputh_data_pingpang *)fth->user_data; + struct cputh_data_pingpang *to = (struct cputh_data_pingpang *)tth->user_data; sw_cycle = cputh_get_cycle_in_tick(); #ifdef CPUTH_TIMER_COUNTDOWN - sw_cycle = cycle_per_tick-sw_cycle; + sw_cycle = cycle_per_tick - sw_cycle; #endif sw_tick = rt_tick_get(); /* if hardware timer overflowed, update tick */ - if(cputh_get_cycle_overflow()) + if (cputh_get_cycle_overflow()) { sw_tick += 1; } - calc_tick = start_tick+calc_prd; + calc_tick = start_tick + calc_prd; - if(from==RT_NULL) + if (from == RT_NULL) { return; } - - if(to->data[cputh_index].index!=from->data[cputh_index].index) + + if (to->data[cputh_index].index != from->data[cputh_index].index) { - to->data[cputh_index].index=from->data[cputh_index].index; + to->data[cputh_index].index = from->data[cputh_index].index; #ifdef CPUTH_USING_STATISTICS to->data[cputh_index].scheduled = 0; to->data[cputh_index].overed = 0; @@ -1122,31 +1122,31 @@ static void cputh_hook_of_scheduler(struct rt_thread* fth, struct rt_thread* tth to->data[cputh_index].cycles = 0; } - over_tick = OT(sw_tick,calc_tick,sw_cycle,start_cycle); - over_cycle = OC(sw_cycle,start_cycle); + over_tick = OT(sw_tick, calc_tick, sw_cycle, start_cycle); + over_cycle = OC(sw_cycle, start_cycle); - if(over_tickdata[cputh_index].to_tick,start_cycle,from->data[cputh_index].to_cycle); - rt_uint32_t oc1 = OC(start_cycle,from->data[cputh_index].to_cycle); - rt_uint32_t overed = over_tick/calc_prd; - rt_uint32_t rem_tick = over_tick-overed*calc_prd; - from->data[cputh_index].cycles += ot1*cycle_per_tick+oc1; + rt_uint32_t ot1 = OT(calc_tick, from->data[cputh_index].to_tick, start_cycle, from->data[cputh_index].to_cycle); + rt_uint32_t oc1 = OC(start_cycle, from->data[cputh_index].to_cycle); + rt_uint32_t overed = over_tick / calc_prd; + rt_uint32_t rem_tick = over_tick - overed * calc_prd; + from->data[cputh_index].cycles += ot1 * cycle_per_tick + oc1; #ifdef CPUTH_USING_STATISTICS - from->data[cputh_index].overed = over_tick/calc_prd; + from->data[cputh_index].overed = over_tick / calc_prd; #endif - cputh_index ^= 1; - index_now +=1; - start_tick = calc_tick + over_tick-rem_tick; - - from->data[cputh_index].cycles = rem_tick*cycle_per_tick+over_cycle; + cputh_index ^= 1; + index_now += 1; + start_tick = calc_tick + over_tick - rem_tick; + + from->data[cputh_index].cycles = rem_tick * cycle_per_tick + over_cycle; from->data[cputh_index].index = index_now; - + to->data[cputh_index].cycles = 0; to->data[cputh_index].to_tick = sw_tick; - to->data[cputh_index].to_cycle = sw_cycle; + to->data[cputh_index].to_cycle = sw_cycle; to->data[cputh_index].index = index_now; - + #ifdef CPUTH_USING_STATISTICS from->data[cputh_index].scheduled = 0; to->data[cputh_index].scheduled = 1; @@ -1154,14 +1154,14 @@ static void cputh_hook_of_scheduler(struct rt_thread* fth, struct rt_thread* tth } else { - rt_uint32_t ot1 = OT(sw_tick,from->data[cputh_index].to_tick,sw_cycle,from->data[cputh_index].to_cycle); - rt_uint32_t oc1 = OC(sw_cycle,from->data[cputh_index].to_cycle); - from->data[cputh_index].cycles += ot1*cycle_per_tick+oc1; + rt_uint32_t ot1 = OT(sw_tick, from->data[cputh_index].to_tick, sw_cycle, from->data[cputh_index].to_cycle); + rt_uint32_t oc1 = OC(sw_cycle, from->data[cputh_index].to_cycle); + from->data[cputh_index].cycles += ot1 * cycle_per_tick + oc1; to->data[cputh_index].to_tick = sw_tick; to->data[cputh_index].to_cycle = sw_cycle; #ifdef CPUTH_USING_STATISTICS - to->data[cputh_index].scheduled++; -#endif + to->data[cputh_index].scheduled++; +#endif } } @@ -1169,45 +1169,45 @@ void print_usage(void) { struct rt_list_node *node = RT_NULL; struct rt_thread *thread; - rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; - struct cputh_data * data; + rt_list_t *list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + struct cputh_data *data; rt_uint32_t usage; #ifdef CPUTH_USING_STATISTICS - rt_kprintf("tick %d\n",rt_tick_get()); + rt_kprintf("tick %d\n", rt_tick_get()); rt_kprintf("%-*.s usage overed cycled scheduled\n", RT_NAME_MAX, "thread"); rt_kprintf("-------- ------- ---------- ---------- ----------\n"); rt_enter_critical(); rt_list_for_each(node, list) { thread = rt_container_of(node, struct rt_thread, parent.list); - data = &((struct cputh_data_pingpang *)thread->user_data)->data[cputh_index^1]; - usage = data->cycles*10000.0f/calc_prd/cycle_per_tick; - rt_kprintf("%-*.*s",RT_NAME_MAX, RT_NAME_MAX, thread->parent.name); - if(data->index+1 == index_now) + data = &((struct cputh_data_pingpang *)thread->user_data)->data[cputh_index ^ 1]; + usage = data->cycles * 10000.0f / calc_prd / cycle_per_tick; + rt_kprintf("%-*.*s", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name); + if (data->index + 1 == index_now) { - rt_kprintf(" %3u.%02u%% 0x%08x 0x%08x 0x%08x\n", usage/100,usage%100,data->overed,data->cycles,data->scheduled); + rt_kprintf(" %3u.%02u%% 0x%08x 0x%08x 0x%08x\n", usage / 100, usage % 100, data->overed, data->cycles, data->scheduled); } else { - rt_kprintf(" %3u.%02u%% 0x%08x 0x%08x 0x%08x\n", 0,0,0,0,0); + rt_kprintf(" %3u.%02u%% 0x%08x 0x%08x 0x%08x\n", 0, 0, 0, 0, 0); } } rt_exit_critical(); #else - rt_kprintf("tick %d\n",rt_tick_get()); + rt_kprintf("tick %d\n", rt_tick_get()); rt_kprintf("%-*.s usage\n", RT_NAME_MAX, "thread"); rt_kprintf("-------- -------\n"); rt_enter_critical(); rt_list_for_each(node, list) { thread = rt_container_of(node, struct rt_thread, parent.list); - data = &((struct cputh_data_pingpang *)thread->user_data)->data[cputh_index^1]; - usage = data->cycles*10000.0f/calc_prd/cycle_per_tick; - if(data->index+1 == index_now) + data = &((struct cputh_data_pingpang *)thread->user_data)->data[cputh_index ^ 1]; + usage = data->cycles * 10000.0f / calc_prd / cycle_per_tick; + if (data->index + 1 == index_now) { - rt_kprintf("%-*.*s %3u.%02u%%\n", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name, usage/100, usage%100); + rt_kprintf("%-*.*s %3u.%02u%%\n", RT_NAME_MAX, RT_NAME_MAX, thread->parent.name, usage / 100, usage % 100); } else { @@ -1225,15 +1225,15 @@ void print_usage(void) static struct rt_event event_cpu; static struct rt_timer tim_cpu; -static void rt_timer_cpu(void * parameter) +static void rt_timer_cpu(void *parameter) { - rt_event_send(&event_cpu,EVENT_CPU_UPDATE_PERIOD); + rt_event_send(&event_cpu, EVENT_CPU_UPDATE_PERIOD); } static rt_err_t _rx_ind(rt_device_t dev, rt_size_t size) { RT_ASSERT(dev != RT_NULL); - rt_event_send(&event_cpu,EVENT_CPU_UPDATE_MANUAL); + rt_event_send(&event_cpu, EVENT_CPU_UPDATE_MANUAL); return RT_EOK; } @@ -1241,19 +1241,19 @@ static void release_data(void) { struct rt_list_node *node = RT_NULL; struct rt_thread *thread; - rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + rt_list_t *list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; rt_list_for_each(node, list) { thread = rt_container_of(node, struct rt_thread, parent.list); - if(thread->user_data!=RT_NULL) + if (thread->user_data != RT_NULL) { - rt_free((void*)thread->user_data); + rt_free((void *)thread->user_data); thread->user_data = RT_NULL; } } } -static void cputh(int argc, char**argv) +static void cputh(int argc, char **argv) { cputh_index = 0; index_now = 0; @@ -1263,12 +1263,12 @@ static void cputh(int argc, char**argv) struct rt_list_node *node = RT_NULL; struct rt_thread *thread; - rt_list_t * list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; + rt_list_t *list = &rt_object_get_information(RT_Object_Class_Thread)->object_list; rt_list_for_each(node, list) { - void * data = rt_malloc(sizeof(struct cputh_data_pingpang)); - if(data == RT_NULL) + void *data = rt_malloc(sizeof(struct cputh_data_pingpang)); + if (data == RT_NULL) { rt_kprintf("failed to init cputh data\n"); release_data(); @@ -1280,46 +1280,46 @@ static void cputh(int argc, char**argv) } rt_int32_t num = -1; - if(argc>1) + if (argc > 1) { calc_prd = atoi(argv[1]); - if(calc_prd==0) + if (calc_prd == 0) { calc_prd = RT_TICK_PER_SECOND; } } - if(argc>2) + if (argc > 2) { num = atoi(argv[2]); } cycle_per_tick = cputh_get_cycle_per_tick(); - rt_timer_init(&tim_cpu,"timcpu",rt_timer_cpu,RT_NULL,calc_prd,RT_TIMER_FLAG_PERIODIC); + rt_timer_init(&tim_cpu, "timcpu", rt_timer_cpu, RT_NULL, calc_prd, RT_TIMER_FLAG_PERIODIC); rt_event_init(&event_cpu, "evtcpu", RT_IPC_FLAG_FIFO); rt_device_t console = rt_console_get_device(); - rt_err_t (*rx_ind)(rt_device_t dev,rt_size_t size) = console->rx_indicate; + rt_err_t (*rx_ind)(rt_device_t dev, rt_size_t size) = console->rx_indicate; /* take over console input */ rt_device_set_rx_indicate(console, _rx_ind); rt_enter_critical(); start_tick = rt_tick_get(); start_cycle = cputh_get_cycle_in_tick(); #ifdef CPUTH_TIMER_COUNTDOWN - start_cycle = cycle_per_tick-start_cycle; + start_cycle = cycle_per_tick - start_cycle; #endif - thread=rt_thread_self(); - ((struct cputh_data*)thread->user_data)->to_tick = start_tick; - ((struct cputh_data*)thread->user_data)->to_cycle = start_cycle; + thread = rt_thread_self(); + ((struct cputh_data *)thread->user_data)->to_tick = start_tick; + ((struct cputh_data *)thread->user_data)->to_cycle = start_cycle; rt_scheduler_sethook(cputh_hook_of_scheduler); rt_timer_start(&tim_cpu); rt_exit_critical(); - - rt_kprintf("cputh period %u, ",calc_prd); - rt_kprintf("cycles per tick %u\n",cycle_per_tick); - + + rt_kprintf("cputh period %u, ", calc_prd); + rt_kprintf("cycles per tick %u\n", cycle_per_tick); + /* redraw screen */ rt_kprintf("\033c"); /* save cursor position */ @@ -1329,31 +1329,31 @@ static void cputh(int argc, char**argv) /* restore cursor positon */ rt_kprintf("\033[u"); print_usage(); - - while(num) + + while (num) { rt_uint32_t e; rt_event_recv(&event_cpu, - (EVENT_CPU_UPDATE_PERIOD | EVENT_CPU_UPDATE_MANUAL), - RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, - RT_WAITING_FOREVER,&e); - if(e&EVENT_CPU_UPDATE_MANUAL) + (EVENT_CPU_UPDATE_PERIOD | EVENT_CPU_UPDATE_MANUAL), + RT_EVENT_FLAG_OR | RT_EVENT_FLAG_CLEAR, + RT_WAITING_FOREVER, &e); + if (e & EVENT_CPU_UPDATE_MANUAL) { char ach; rt_size_t size; __repeat: size = rt_device_read(console, -1, &ach, 1); - if(size==0) + if (size == 0) { continue; } /* ctrl+c */ - if(ach == 0x03) + if (ach == 0x03) { break; } - if(ach == 'q') + if (ach == 'q') { break; } @@ -1365,7 +1365,7 @@ static void cputh(int argc, char**argv) rt_kprintf("\033[u"); print_usage(); - if(num>0) + if (num > 0) { num--; } @@ -1384,7 +1384,7 @@ static void cputh(int argc, char**argv) release_data(); } -MSH_CMD_EXPORT(cputh,cputh [period] [counts]); +MSH_CMD_EXPORT(cputh, cputh [period] [counts]); #endif /* FINSH_USING_CPUTH */ #endif /* RT_USING_FINSH */