diff --git a/src/ipc.c b/src/ipc.c index 35c0146dd33..05ad3d52ade 100644 --- a/src/ipc.c +++ b/src/ipc.c @@ -46,6 +46,8 @@ * 2022-10-16 Bernard add prioceiling feature in mutex * 2023-04-16 Xin-zheqi redesigen queue recv and send function return real message size * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable + * 2025-06-01 htl5241 remove redundancy rt_schedule() + * fix timer overflow */ #include @@ -626,9 +628,6 @@ static rt_err_t _rt_sem_take(rt_sem_t sem, rt_int32_t timeout, int suspend_flag) /* enable interrupt */ rt_spin_unlock_irqrestore(&(sem->spinlock), level); - /* do schedule */ - rt_schedule(); - if (thread->error != RT_EOK) { return thread->error > 0 ? -thread->error : thread->error; @@ -695,16 +694,12 @@ RTM_EXPORT(rt_sem_trytake); rt_err_t rt_sem_release(rt_sem_t sem) { rt_base_t level; - rt_bool_t need_schedule; - /* parameter check */ RT_ASSERT(sem != RT_NULL); RT_ASSERT(rt_object_get_type(&sem->parent.parent) == RT_Object_Class_Semaphore); RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(sem->parent.parent))); - need_schedule = RT_FALSE; - level = rt_spin_lock_irqsave(&(sem->spinlock)); LOG_D("thread %s releases sem:%s, which value is: %d", @@ -716,7 +711,6 @@ rt_err_t rt_sem_release(rt_sem_t sem) { /* resume the suspended thread */ rt_susp_list_dequeue(&(sem->parent.suspend_thread), RT_EOK); - need_schedule = RT_TRUE; } else { @@ -733,10 +727,6 @@ rt_err_t rt_sem_release(rt_sem_t sem) rt_spin_unlock_irqrestore(&(sem->spinlock), level); - /* resume a thread, re-schedule */ - if (need_schedule == RT_TRUE) - rt_schedule(); - return RT_EOK; } RTM_EXPORT(rt_sem_release); @@ -778,14 +768,12 @@ rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg) /* set new value */ sem->value = (rt_uint16_t)value; rt_spin_unlock_irqrestore(&(sem->spinlock), level); - rt_schedule(); return RT_EOK; } else if (cmd == RT_IPC_CMD_SET_VLIMIT) { rt_ubase_t max_value; - rt_bool_t need_schedule = RT_FALSE; max_value = (rt_uint16_t)((rt_uintptr_t)arg); if (max_value > RT_SEM_VALUE_MAX || max_value < 1) @@ -800,18 +788,12 @@ rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg) { /* resume all waiting thread */ rt_susp_list_resume_all(&sem->parent.suspend_thread, RT_ERROR); - need_schedule = RT_TRUE; } } /* set new value */ sem->max_value = max_value; rt_spin_unlock_irqrestore(&(sem->spinlock), level); - if (need_schedule) - { - rt_schedule(); - } - return RT_EOK; } @@ -1445,9 +1427,6 @@ static rt_err_t _rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout, int suspend rt_spin_unlock(&(mutex->spinlock)); - /* do schedule */ - rt_schedule(); - rt_spin_lock(&(mutex->spinlock)); if (mutex->owner == thread) @@ -1590,14 +1569,10 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) { rt_sched_lock_level_t slvl; struct rt_thread *thread; - rt_bool_t need_schedule; - /* parameter check */ RT_ASSERT(mutex != RT_NULL); RT_ASSERT(rt_object_get_type(&mutex->parent.parent) == RT_Object_Class_Mutex); - need_schedule = RT_FALSE; - /* only thread could release mutex because we need test the ownership */ RT_DEBUG_IN_THREAD_CONTEXT; @@ -1631,8 +1606,7 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) rt_list_remove(&mutex->taken_list); /* whether change the thread priority */ - need_schedule = _check_and_update_prio(thread, mutex); - + _check_and_update_prio(thread, mutex); /* wakeup suspended thread */ if (!rt_list_isempty(&mutex->parent.suspend_thread)) { @@ -1683,8 +1657,6 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) { mutex->priority = 0xff; } - - need_schedule = RT_TRUE; } else { @@ -1707,10 +1679,6 @@ rt_err_t rt_mutex_release(rt_mutex_t mutex) rt_spin_unlock(&(mutex->spinlock)); - /* perform a schedule */ - if (need_schedule == RT_TRUE) - rt_schedule(); - return RT_EOK; } RTM_EXPORT(rt_mutex_release); @@ -1968,7 +1936,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) rt_sched_lock_level_t slvl; rt_base_t level; rt_base_t status; - rt_bool_t need_schedule; rt_uint32_t need_clear_set = 0; /* parameter check */ @@ -1978,8 +1945,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) if (set == 0) return -RT_ERROR; - need_schedule = RT_FALSE; - level = rt_spin_lock_irqsave(&(event->spinlock)); /* set event */ @@ -2039,8 +2004,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) rt_sched_thread_ready(thread); thread->error = RT_EOK; - /* need do a scheduling */ - need_schedule = RT_TRUE; } } if (need_clear_set) @@ -2052,10 +2015,6 @@ rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set) rt_sched_unlock(slvl); rt_spin_unlock_irqrestore(&(event->spinlock), level); - /* do a schedule */ - if (need_schedule == RT_TRUE) - rt_schedule(); - return RT_EOK; } RTM_EXPORT(rt_event_send); @@ -2195,9 +2154,6 @@ static rt_err_t _rt_event_recv(rt_event_t event, rt_spin_unlock_irqrestore(&(event->spinlock), level); - /* do a schedule */ - rt_schedule(); - if (thread->error != RT_EOK) { /* return error */ @@ -2284,8 +2240,6 @@ rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg) rt_spin_unlock_irqrestore(&(event->spinlock), level); - rt_schedule(); - return RT_EOK; } @@ -2567,7 +2521,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, { struct rt_thread *thread; rt_base_t level; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; rt_err_t ret; /* parameter check */ @@ -2578,7 +2532,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0); /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); @@ -2622,7 +2576,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("mb_send_wait: start timer of thread:%s", thread->parent.name); @@ -2635,9 +2589,6 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, } rt_spin_unlock_irqrestore(&(mb->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* resume from suspend state */ if (thread->error != RT_EOK) { @@ -2650,8 +2601,7 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -2682,8 +2632,6 @@ static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb, rt_spin_unlock_irqrestore(&(mb->spinlock), level); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mb->spinlock), level); @@ -2806,8 +2754,6 @@ rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value) rt_spin_unlock_irqrestore(&(mb->spinlock), level); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mb->spinlock), level); @@ -2846,7 +2792,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo { struct rt_thread *thread; rt_base_t level; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; rt_err_t ret; /* parameter check */ @@ -2857,7 +2803,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0); /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); @@ -2902,7 +2848,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("mb_recv: start timer of thread:%s", thread->parent.name); @@ -2916,9 +2862,6 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo rt_spin_unlock_irqrestore(&(mb->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* resume from suspend state */ if (thread->error != RT_EOK) { @@ -2930,8 +2873,7 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -2960,8 +2902,6 @@ static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeo RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mb->parent.parent))); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mb->spinlock), level); @@ -3029,8 +2969,6 @@ rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg) rt_spin_unlock_irqrestore(&(mb->spinlock), level); - rt_schedule(); - return RT_EOK; } @@ -3382,7 +3320,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, { rt_base_t level; struct rt_mq_message *msg; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; struct rt_thread *thread; rt_err_t ret; @@ -3402,7 +3340,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, return -RT_ERROR; /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); @@ -3447,7 +3385,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("mq_send_wait: start timer of thread:%s", thread->parent.name); @@ -3461,9 +3399,6 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, rt_spin_unlock_irqrestore(&(mq->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* resume from suspend state */ if (thread->error != RT_EOK) { @@ -3475,8 +3410,7 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -3556,8 +3490,6 @@ static rt_err_t _rt_mq_send_wait(rt_mq_t mq, rt_spin_unlock_irqrestore(&(mq->spinlock), level); - rt_schedule(); - return RT_EOK; } rt_spin_unlock_irqrestore(&(mq->spinlock), level); @@ -3714,8 +3646,6 @@ rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size) rt_spin_unlock_irqrestore(&(mq->spinlock), level); - rt_schedule(); - return RT_EOK; } @@ -3765,7 +3695,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, struct rt_thread *thread; rt_base_t level; struct rt_mq_message *msg; - rt_uint32_t tick_delta; + rt_uint32_t tick_stamp; rt_err_t ret; rt_size_t len; @@ -3781,7 +3711,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0); /* initialize delta tick */ - tick_delta = 0; + tick_stamp = 0; /* get current thread */ thread = rt_thread_self(); RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(mq->parent.parent))); @@ -3826,7 +3756,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, if (timeout > 0) { /* get the start tick of timer */ - tick_delta = rt_tick_get(); + tick_stamp = rt_tick_get(); LOG_D("set thread:%s to timer list", thread->parent.name); @@ -3840,9 +3770,6 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, rt_spin_unlock_irqrestore(&(mq->spinlock), level); - /* re-schedule */ - rt_schedule(); - /* recv message */ if (thread->error != RT_EOK) { @@ -3855,8 +3782,7 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, /* if it's not waiting forever and then re-calculate timeout tick */ if (timeout > 0) { - tick_delta = rt_tick_get() - tick_delta; - timeout -= tick_delta; + timeout -= rt_tick_get_delta(tick_stamp); if (timeout < 0) timeout = 0; } @@ -3905,8 +3831,6 @@ static rt_ssize_t _rt_mq_recv(rt_mq_t mq, RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mq->parent.parent))); - rt_schedule(); - return len; } @@ -4018,9 +3942,7 @@ rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg) mq->entry = 0; rt_spin_unlock_irqrestore(&(mq->spinlock), level); - - rt_schedule(); - + return RT_EOK; } diff --git a/src/mempool.c b/src/mempool.c index 28bdb6cf896..9c219427f96 100644 --- a/src/mempool.c +++ b/src/mempool.c @@ -17,6 +17,8 @@ * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to mempool.c * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable * 2023-12-10 xqyjlj fix spinlock assert + * 2025-06-01 htl5241 remove redundancy rt_schedule() + * fix timer overflow */ #include @@ -327,15 +329,12 @@ void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time) /* enable interrupt */ rt_spin_unlock_irqrestore(&(mp->spinlock), level); - /* do a schedule */ - rt_schedule(); - if (thread->error != RT_EOK) return RT_NULL; if (time > 0) { - time -= rt_tick_get() - before_sleep; + time -= rt_tick_get_delta(before_sleep); if (time < 0) time = 0; } @@ -397,9 +396,6 @@ void rt_mp_free(void *block) { rt_spin_unlock_irqrestore(&(mp->spinlock), level); - /* do a schedule */ - rt_schedule(); - return; } rt_spin_unlock_irqrestore(&(mp->spinlock), level); diff --git a/src/scheduler_up.c b/src/scheduler_up.c index 95e3cc7fb64..f2394783a0e 100644 --- a/src/scheduler_up.c +++ b/src/scheduler_up.c @@ -30,26 +30,28 @@ * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c * 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c * 2023-10-17 ChuShicheng Modify the timing of clearing RT_THREAD_STAT_YIELD flag bits + * 2025-06-01 htl5241 fix task miss and performance improvement */ #define __RT_IPC_SOURCE__ #include #include -#define DBG_TAG "kernel.scheduler" -#define DBG_LVL DBG_INFO +#define DBG_TAG "kernel.scheduler" +#define DBG_LVL DBG_INFO #include -rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; +rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; rt_uint32_t rt_thread_ready_priority_group; +rt_base_t rt_thread_ready_highest_priority; #if RT_THREAD_PRIORITY_MAX > 32 /* Maximum priority level, 256 */ rt_uint8_t rt_thread_ready_table[32]; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ extern volatile rt_atomic_t rt_interrupt_nest; -static rt_int16_t rt_scheduler_lock_nest; -rt_uint8_t rt_current_priority; +static rt_int16_t rt_scheduler_lock_nest; +rt_uint8_t rt_current_priority; #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR) static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to); @@ -67,7 +69,8 @@ static void (*rt_scheduler_switch_hook)(struct rt_thread *tid); * * @param hook is the hook function. */ -void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to)) +void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, + struct rt_thread *to)) { rt_scheduler_hook = hook; } @@ -86,26 +89,25 @@ void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid)) /**@}*/ #endif /* RT_USING_HOOK */ -static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio) +rt_inline void _scheduler_update_highest_priority(void) { - struct rt_thread *highest_priority_thread; - rt_ubase_t highest_ready_priority; - #if RT_THREAD_PRIORITY_MAX > 32 rt_ubase_t number; - number = __rt_ffs(rt_thread_ready_priority_group) - 1; - highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; + rt_thread_ready_highest_priority = + (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; #else - highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; + rt_thread_ready_highest_priority = + __rt_ffs(rt_thread_ready_priority_group) - 1; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ +} +rt_inline struct rt_thread * +_scheduler_get_priority_thread(rt_ubase_t priority) +{ /* get highest ready priority thread */ - highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(rt_thread_priority_table[highest_ready_priority].next); - - *highest_prio = highest_ready_priority; - - return highest_priority_thread; + return RT_THREAD_LIST_NODE_ENTRY( + rt_thread_priority_table[priority].next); } rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl) @@ -147,10 +149,9 @@ void rt_system_scheduler_init(void) rt_base_t offset; rt_scheduler_lock_nest = 0; - LOG_D("start scheduler: max priority 0x%02x", - RT_THREAD_PRIORITY_MAX); + LOG_D("start scheduler: max priority 0x%02x", RT_THREAD_PRIORITY_MAX); - for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++) + for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; ++offset) { rt_list_init(&rt_thread_priority_table[offset]); } @@ -171,13 +172,15 @@ void rt_system_scheduler_init(void) void rt_system_scheduler_start(void) { struct rt_thread *to_thread; - rt_ubase_t highest_ready_priority; - to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority); + _scheduler_update_highest_priority(); + to_thread = _scheduler_get_priority_thread( + rt_thread_ready_highest_priority); rt_cpu_self()->current_thread = to_thread; rt_sched_remove_thread(to_thread); + RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING; /* switch to new thread */ @@ -187,6 +190,82 @@ void rt_system_scheduler_start(void) /* never come back */ } +/** + * @brief This function will insert a thread to the system ready queue. The state of + * thread will be set as READY and the thread will be removed from suspend queue. + * + * @param thread is the thread to be inserted. + * + * @note Please do not invoke this function in user application. + */ +rt_inline void _rt_sched_insert_thread(struct rt_thread *thread) +{ + /* READY thread, insert to ready queue */ + RT_SCHED_CTX(thread).stat = + RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK); + /* there is no time slices left(YIELD), inserting thread before ready list*/ + if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) + { + rt_list_insert_before( + &(rt_thread_priority_table[RT_SCHED_PRIV(thread) + .current_priority]), + &RT_THREAD_LIST_NODE(thread)); + } + /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/ + else + { + rt_list_insert_after( + &(rt_thread_priority_table[RT_SCHED_PRIV(thread) + .current_priority]), + &RT_THREAD_LIST_NODE(thread)); + } + + LOG_D("insert thread[%.*s], the priority: %d", RT_NAME_MAX, + thread->parent.name, + RT_SCHED_PRIV(rt_current_thread).current_priority); + + /* set priority mask */ +#if RT_THREAD_PRIORITY_MAX > 32 + rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= + RT_SCHED_PRIV(thread).high_mask; +#endif /* RT_THREAD_PRIORITY_MAX > 32 */ + rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask; +} + +/** + * @brief This function will remove a thread from system ready queue. + * + * @param thread is the thread to be removed. + * + * @note Please do not invoke this function in user application. + */ +rt_inline void _rt_sched_remove_thread(struct rt_thread *thread) +{ + LOG_D("remove thread[%.*s], the priority: %d", RT_NAME_MAX, + thread->parent.name, + RT_SCHED_PRIV(rt_current_thread).current_priority); + + /* remove thread from ready list */ + rt_list_remove(&RT_THREAD_LIST_NODE(thread)); + if (rt_list_isempty( + &(rt_thread_priority_table[RT_SCHED_PRIV(thread) + .current_priority]))) + { +#if RT_THREAD_PRIORITY_MAX > 32 + rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= + ~RT_SCHED_PRIV(thread).high_mask; + if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0) + { + rt_thread_ready_priority_group &= + ~RT_SCHED_PRIV(thread).number_mask; + } +#else + rt_thread_ready_priority_group &= + ~RT_SCHED_PRIV(thread).number_mask; +#endif /* RT_THREAD_PRIORITY_MAX > 32 */ + } +} + /** * @addtogroup group_Thread * @cond @@ -201,127 +280,135 @@ void rt_system_scheduler_start(void) void rt_schedule(void) { rt_base_t level; + /* need_insert_from_thread: need to insert from_thread to ready queue */ + int need_insert_from_thread; + /* using local variable to avoid unecessary function call */ + struct rt_thread *curr_thread; struct rt_thread *to_thread; struct rt_thread *from_thread; - /* using local variable to avoid unecessary function call */ - struct rt_thread *curr_thread = rt_thread_self(); - /* disable interrupt */ level = rt_hw_interrupt_disable(); /* check the scheduler is enabled or not */ - if (rt_scheduler_lock_nest == 0) + if (rt_scheduler_lock_nest == 0 && rt_thread_ready_priority_group) { - rt_ubase_t highest_ready_priority; + curr_thread = rt_thread_self(); - if (rt_thread_ready_priority_group != 0) + if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) { - /* need_insert_from_thread: need to insert from_thread to ready queue */ - int need_insert_from_thread = 0; + if (RT_SCHED_PRIV(curr_thread).current_priority < rt_thread_ready_highest_priority) + { + to_thread = curr_thread; + } + else if (RT_SCHED_PRIV(curr_thread).current_priority == rt_thread_ready_highest_priority && (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0) + { + to_thread = curr_thread; + } + else + { + to_thread = _scheduler_get_priority_thread( + rt_thread_ready_highest_priority); + need_insert_from_thread = 1; + } + } + else + { + to_thread = _scheduler_get_priority_thread( + rt_thread_ready_highest_priority); + need_insert_from_thread = 0; + } - to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority); + if (to_thread != curr_thread) + { + /* if the destination thread is not the same as current thread */ + rt_current_priority = + (rt_uint8_t)rt_thread_ready_highest_priority; + from_thread = curr_thread; + rt_cpu_self()->current_thread = to_thread; - if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING) + RT_OBJECT_HOOK_CALL(rt_scheduler_hook, + (from_thread, to_thread)); + + if (need_insert_from_thread) { - if (RT_SCHED_PRIV(curr_thread).current_priority < highest_ready_priority) - { - to_thread = curr_thread; - } - else if (RT_SCHED_PRIV(curr_thread).current_priority == highest_ready_priority - && (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0) - { - to_thread = curr_thread; - } - else - { - need_insert_from_thread = 1; - } + _rt_sched_remove_thread(from_thread); + _rt_sched_insert_thread(from_thread); } - if (to_thread != curr_thread) + if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) { - /* if the destination thread is not the same as current thread */ - rt_current_priority = (rt_uint8_t)highest_ready_priority; - from_thread = curr_thread; - rt_cpu_self()->current_thread = to_thread; + RT_SCHED_CTX(from_thread).stat &= + ~RT_THREAD_STAT_YIELD_MASK; + } - RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread)); + _rt_sched_remove_thread(to_thread); + RT_SCHED_CTX(to_thread).stat = + RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK); - if (need_insert_from_thread) - { - rt_sched_insert_thread(from_thread); - } + _scheduler_update_highest_priority(); - if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) - { - RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK; - } + /* switch to new thread */ + LOG_D("[%d]switch to priority#%d " + "thread:%.*s(sp:0x%08x), " + "from thread:%.*s(sp: 0x%08x)", + rt_interrupt_nest, highest_ready_priority, + RT_NAME_MAX, to_thread->parent.name, + to_thread->sp, RT_NAME_MAX, + from_thread->parent.name, from_thread->sp); - rt_sched_remove_thread(to_thread); - RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK); + RT_SCHEDULER_STACK_CHECK(to_thread); - /* switch to new thread */ - LOG_D("[%d]switch to priority#%d " - "thread:%.*s(sp:0x%08x), " - "from thread:%.*s(sp: 0x%08x)", - rt_interrupt_nest, highest_ready_priority, - RT_NAME_MAX, to_thread->parent.name, to_thread->sp, - RT_NAME_MAX, from_thread->parent.name, from_thread->sp); + if (rt_interrupt_nest == 0) + { + extern void rt_thread_handle_sig( + rt_bool_t clean_state); - RT_SCHEDULER_STACK_CHECK(to_thread); + RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, + (from_thread)); - if (rt_interrupt_nest == 0) - { - extern void rt_thread_handle_sig(rt_bool_t clean_state); + rt_hw_context_switch( + (rt_uintptr_t)&from_thread->sp, + (rt_uintptr_t)&to_thread->sp); - RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread)); + /* enable interrupt */ + rt_hw_interrupt_enable(level); - rt_hw_context_switch((rt_uintptr_t)&from_thread->sp, - (rt_uintptr_t)&to_thread->sp); +#ifdef RT_USING_SIGNALS + /* check stat of thread for signal */ + level = rt_hw_interrupt_disable(); + if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING) + { + extern void rt_thread_handle_sig( + rt_bool_t clean_state); + + RT_SCHED_CTX(curr_thread).stat &= + ~RT_THREAD_STAT_SIGNAL_PENDING; - /* enable interrupt */ rt_hw_interrupt_enable(level); -#ifdef RT_USING_SIGNALS - /* check stat of thread for signal */ - level = rt_hw_interrupt_disable(); - if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING) - { - extern void rt_thread_handle_sig(rt_bool_t clean_state); - - RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING; - - rt_hw_interrupt_enable(level); - - /* check signal status */ - rt_thread_handle_sig(RT_TRUE); - } - else - { - rt_hw_interrupt_enable(level); - } -#endif /* RT_USING_SIGNALS */ - goto __exit; + /* check signal status */ + rt_thread_handle_sig(RT_TRUE); } else { - LOG_D("switch in interrupt"); - - rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp, - (rt_uintptr_t)&to_thread->sp, from_thread, to_thread); + rt_hw_interrupt_enable(level); } +#endif /* RT_USING_SIGNALS */ + goto __exit; } else { - rt_sched_remove_thread(curr_thread); - RT_SCHED_CTX(curr_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(curr_thread).stat & ~RT_THREAD_STAT_MASK); + LOG_D("switch in interrupt"); + + rt_hw_context_switch_interrupt( + (rt_uintptr_t)&from_thread->sp, + (rt_uintptr_t)&to_thread->sp, + from_thread, to_thread); } } } - /* enable interrupt */ rt_hw_interrupt_enable(level); - __exit: return; } @@ -330,18 +417,22 @@ void rt_schedule(void) void rt_sched_thread_startup(struct rt_thread *thread) { #if RT_THREAD_PRIORITY_MAX > 32 - RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */ + RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */ RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number; - RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */ + RT_SCHED_PRIV(thread).high_mask = + 1L + << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */ #else - RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority; -#endif /* RT_THREAD_PRIORITY_MAX > 32 */ + RT_SCHED_PRIV(thread).number_mask = + 1L << RT_SCHED_PRIV(thread).current_priority; +#endif /* RT_THREAD_PRIORITY_MAX > 32 */ /* change thread stat, so we can resume it */ RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND; } -void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority) +void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, + rt_uint8_t priority) { rt_list_init(&RT_THREAD_LIST_NODE(thread)); @@ -353,12 +444,12 @@ void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_ui /* don't add to scheduler queue as init thread */ RT_SCHED_PRIV(thread).number_mask = 0; #if RT_THREAD_PRIORITY_MAX > 32 - RT_SCHED_PRIV(thread).number = 0; + RT_SCHED_PRIV(thread).number = 0; RT_SCHED_PRIV(thread).high_mask = 0; #endif /* RT_THREAD_PRIORITY_MAX > 32 */ /* tick init */ - RT_SCHED_PRIV(thread).init_tick = tick; + RT_SCHED_PRIV(thread).init_tick = tick; RT_SCHED_PRIV(thread).remaining_tick = tick; } @@ -382,33 +473,14 @@ void rt_sched_insert_thread(struct rt_thread *thread) /* it's current thread, it should be RUNNING thread */ if (thread == rt_current_thread) { - RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK); + RT_SCHED_CTX(thread).stat = + RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK); goto __exit; } - /* READY thread, insert to ready queue */ - RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK); - /* there is no time slices left(YIELD), inserting thread before ready list*/ - if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0) - { - rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]), - &RT_THREAD_LIST_NODE(thread)); - } - /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/ - else - { - rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]), - &RT_THREAD_LIST_NODE(thread)); - } - - LOG_D("insert thread[%.*s], the priority: %d", - RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(rt_current_thread).current_priority); + _rt_sched_insert_thread(thread); - /* set priority mask */ -#if RT_THREAD_PRIORITY_MAX > 32 - rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask; -#endif /* RT_THREAD_PRIORITY_MAX > 32 */ - rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask; + _scheduler_update_highest_priority(); __exit: /* enable interrupt */ @@ -431,24 +503,9 @@ void rt_sched_remove_thread(struct rt_thread *thread) /* disable interrupt */ level = rt_hw_interrupt_disable(); - LOG_D("remove thread[%.*s], the priority: %d", - RT_NAME_MAX, thread->parent.name, - RT_SCHED_PRIV(rt_current_thread).current_priority); + _rt_sched_remove_thread(thread); - /* remove thread from ready list */ - rt_list_remove(&RT_THREAD_LIST_NODE(thread)); - if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]))) - { -#if RT_THREAD_PRIORITY_MAX > 32 - rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask; - if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0) - { - rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask; - } -#else - rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask; -#endif /* RT_THREAD_PRIORITY_MAX > 32 */ - } + _scheduler_update_highest_priority(); /* enable interrupt */ rt_hw_interrupt_enable(level); @@ -468,16 +525,16 @@ void rt_exit_critical_safe(rt_base_t critical_level) { if (critical_level != rt_scheduler_lock_nest) { - int dummy = 1; + int dummy = 1; _critical_error_occurred = 1; - rt_kprintf("%s: un-compatible critical level\n" \ + rt_kprintf("%s: un-compatible critical level\n" "\tCurrent %d\n\tCaller %d\n", __func__, rt_scheduler_lock_nest, critical_level); rt_backtrace(); - while (dummy) ; + while (dummy); } } rt_hw_interrupt_enable(level); @@ -485,14 +542,14 @@ void rt_exit_critical_safe(rt_base_t critical_level) rt_exit_critical(); } -#else /* !RT_DEBUGING_CRITICAL */ +#else /* !RT_DEBUGING_CRITICAL */ void rt_exit_critical_safe(rt_base_t critical_level) { rt_exit_critical(); } -#endif/* RT_DEBUGING_CRITICAL */ +#endif /* RT_DEBUGING_CRITICAL */ RTM_EXPORT(rt_exit_critical_safe); /** @@ -505,12 +562,11 @@ rt_base_t rt_enter_critical(void) /* disable interrupt */ level = rt_hw_interrupt_disable(); - /* * the maximal number of nest is RT_UINT16_MAX, which is big * enough and does not check here */ - rt_scheduler_lock_nest ++; + ++rt_scheduler_lock_nest; critical_level = rt_scheduler_lock_nest; /* enable interrupt */ @@ -530,7 +586,7 @@ void rt_exit_critical(void) /* disable interrupt */ level = rt_hw_interrupt_disable(); - rt_scheduler_lock_nest --; + --rt_scheduler_lock_nest; if (rt_scheduler_lock_nest <= 0) { rt_scheduler_lock_nest = 0; diff --git a/src/signal.c b/src/signal.c index b33e24220e4..6a7888c755b 100644 --- a/src/signal.c +++ b/src/signal.c @@ -8,6 +8,7 @@ * 2017/10/5 Bernard the first version * 2018/09/17 Jesven fix: in _signal_deliver RT_THREAD_STAT_MASK to RT_THREAD_STAT_SIGNAL_MASK * 2018/11/22 Jesven in smp version rt_hw_context_switch_to add a param + * 2025-06-01 htl5241 remove redundancy rt_schedule() */ #include @@ -118,8 +119,6 @@ static void _signal_deliver(rt_thread_t tid) rt_spin_unlock_irqrestore(&_thread_signal_lock, level); - /* re-schedule */ - rt_schedule(); } else { @@ -165,8 +164,6 @@ static void _signal_deliver(rt_thread_t tid) rt_spin_unlock_irqrestore(&_thread_signal_lock, level); LOG_D("signal stack pointer @ 0x%08x", tid->sp); - /* re-schedule */ - rt_schedule(); } else { @@ -377,9 +374,6 @@ int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout) } rt_spin_unlock_irqrestore(&_thread_signal_lock, level); - /* do thread scheduling */ - rt_schedule(); - level = rt_spin_lock_irqsave(&_thread_signal_lock); /* remove signal waiting flag */ diff --git a/src/thread.c b/src/thread.c index ed9dca13b61..ffd290bfa54 100644 --- a/src/thread.c +++ b/src/thread.c @@ -35,6 +35,8 @@ * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable * 2023-12-10 xqyjlj fix thread_exit/detach/delete * fix rt_thread_delay + * 2025-06-01 htl5241 remove redundancy rt_schedule() + * fix timer overflow */ #include @@ -133,8 +135,6 @@ static void _thread_exit(void) rt_exit_critical_safe(critical_level); - /* switch to next task */ - rt_schedule(); } /** @@ -647,9 +647,6 @@ static rt_err_t _thread_sleep(rt_tick_t tick) thread->error = -RT_EINTR; - /* notify a pending rescheduling */ - rt_schedule(); - /* exit critical and do a rescheduling */ rt_exit_critical_safe(critical_level); @@ -692,7 +689,6 @@ RTM_EXPORT(rt_thread_delay); rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) { struct rt_thread *thread; - rt_tick_t cur_tick; rt_base_t critical_level; RT_ASSERT(tick != RT_NULL); @@ -708,13 +704,15 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) /* disable interrupt */ critical_level = rt_enter_critical(); - cur_tick = rt_tick_get(); - if (cur_tick - *tick < inc_tick) + if (rt_tick_get_delta(*tick) < inc_tick) { rt_tick_t left_tick; + rt_tick_t target_tick; + target_tick = *tick + inc_tick; + left_tick = target_tick - rt_tick_get(); - *tick += inc_tick; - left_tick = *tick - cur_tick; + if (left_tick > target_tick) + left_tick = RT_TICK_MAX - left_tick + 1; /* suspend thread */ rt_thread_suspend_with_flag(thread, RT_UNINTERRUPTIBLE); @@ -725,8 +723,6 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) rt_exit_critical_safe(critical_level); - rt_schedule(); - /* clear error number of this thread to RT_EOK */ if (thread->error == -RT_ETIMEOUT) { @@ -735,7 +731,7 @@ rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick) } else { - *tick = cur_tick; + *tick = rt_tick_get(); rt_exit_critical_safe(critical_level); } diff --git a/src/timer.c b/src/timer.c index ae6da29f760..e8ce986563e 100644 --- a/src/timer.c +++ b/src/timer.c @@ -22,6 +22,8 @@ * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable * 2024-01-25 Shell add RT_TIMER_FLAG_THREAD_TIMER for timer to sync with sched * 2024-05-01 wdfk-prog The rt_timer_check and _soft_timer_check functions are merged + * 2025-06-01 htl5241 remove redundancy + * fix timer overflow */ #include @@ -494,8 +496,6 @@ static void _timer_check(rt_list_t *timer_list, struct rt_spinlock *lock) level = rt_spin_lock_irqsave(lock); - current_tick = rt_tick_get(); - rt_list_init(&list); while (!rt_list_isempty(&timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1])) @@ -539,8 +539,7 @@ static void _timer_check(rt_list_t *timer_list, struct rt_spinlock *lock) continue; } rt_list_remove(&(t->row[RT_TIMER_SKIP_LIST_LEVEL - 1])); - if ((t->parent.flag & RT_TIMER_FLAG_PERIODIC) && - (t->parent.flag & RT_TIMER_FLAG_ACTIVATED)) + if ((t->parent.flag & (RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_ACTIVATED)) == (RT_TIMER_FLAG_PERIODIC | RT_TIMER_FLAG_ACTIVATED)) { /* start it */ t->parent.flag &= ~RT_TIMER_FLAG_ACTIVATED; @@ -747,7 +746,6 @@ RTM_EXPORT(rt_timer_control); */ void rt_timer_check(void) { - RT_ASSERT(rt_interrupt_get_nest() > 0); #ifdef RT_USING_SMP /* Running on core 0 only */ @@ -762,7 +760,7 @@ void rt_timer_check(void) rt_tick_t next_timeout; ret = _timer_list_next_timeout(_soft_timer_list, &next_timeout); - if ((ret == RT_EOK) && (next_timeout <= rt_tick_get())) + if ((ret == RT_EOK) && ((rt_tick_get() - next_timeout) < RT_TICK_MAX / 2)) { rt_sem_release(&_soft_timer_sem); } diff --git a/tools/cmake.py b/tools/cmake.py index e998a48e8cd..6a6aa9c37d6 100644 --- a/tools/cmake.py +++ b/tools/cmake.py @@ -45,7 +45,7 @@ def GenerateCFiles(env, project, project_name): tool_path_conv["CMAKE_ASM_COMPILER"] = tool_path_conv_helper(rtconfig.AS) tool_path_conv["CMAKE_AR"] = tool_path_conv_helper(rtconfig.AR) tool_path_conv["CMAKE_LINKER"] = tool_path_conv_helper(rtconfig.LINK) - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: tool_path_conv["CMAKE_SIZE"] = tool_path_conv_helper(rtconfig.SIZE) tool_path_conv["CMAKE_OBJDUMP"] = tool_path_conv_helper(rtconfig.OBJDUMP) tool_path_conv["CMAKE_OBJCOPY"] = tool_path_conv_helper(rtconfig.OBJCPY) @@ -99,7 +99,7 @@ def GenerateCFiles(env, project, project_name): AS += ".exe" AR += ".exe" LINK += ".exe" - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: SIZE += ".exe" OBJDUMP += ".exe" OBJCOPY += ".exe" @@ -129,7 +129,7 @@ def GenerateCFiles(env, project, project_name): cm_file.write("SET(CMAKE_CXX_FLAGS \""+ CXXFLAGS + "\")\n") cm_file.write("SET(CMAKE_CXX_COMPILER_WORKS TRUE)\n\n") - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: cm_file.write("SET(CMAKE_OBJCOPY \""+ OBJCOPY + "\")\n") cm_file.write("SET(CMAKE_SIZE \""+ SIZE + "\")\n\n") elif rtconfig.PLATFORM in ['armcc', 'armclang']: @@ -137,7 +137,7 @@ def GenerateCFiles(env, project, project_name): LINKER_FLAGS = '' LINKER_LIBS = '' - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: LINKER_FLAGS += '-T' elif rtconfig.PLATFORM in ['armcc', 'armclang']: LINKER_FLAGS += '--scatter' @@ -186,7 +186,7 @@ def GenerateCFiles(env, project, project_name): cm_file.write("ADD_DEFINITIONS(\n") for i in env['CPPDEFINES']: - cm_file.write("\t-D" + i + "\n") + cm_file.write("\t-D" + str(i).replace("(", "").replace(")", "").replace(",", " ") + "\n") cm_file.write(")\n\n") libgroups = [] @@ -290,7 +290,7 @@ def GenerateCFiles(env, project, project_name): cm_file.write("\n") cm_file.write("# Interface library search paths\n") - if rtconfig.PLATFORM in ['gcc']: + if rtconfig.PLATFORM in ['gcc','llvm-arm']: for group in libgroups: if not 'LIBPATH' in group.keys(): continue