diff --git a/components/drivers/clk/SConscript b/components/drivers/clk/SConscript index af6ed4dbeb2..928697863d3 100644 --- a/components/drivers/clk/SConscript +++ b/components/drivers/clk/SConscript @@ -1,7 +1,6 @@ from building import * group = [] -objs = [] if not GetDepend(['RT_USING_CLK']): Return('group') @@ -15,12 +14,7 @@ src = ['clk.c'] if GetDepend(['RT_USING_OFW']): src += ['clk-fixed-rate.c'] -group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) -for d in list: - path = os.path.join(cwd, d) - if os.path.isfile(os.path.join(path, 'SConscript')): - objs = objs + SConscript(os.path.join(d, 'SConscript')) -objs = objs + group +group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) -Return('objs') +Return('group') diff --git a/components/drivers/clk/clk-fixed-rate.c b/components/drivers/clk/clk-fixed-rate.c index 0912e86dd6b..944269c257b 100644 --- a/components/drivers/clk/clk-fixed-rate.c +++ b/components/drivers/clk/clk-fixed-rate.c @@ -6,66 +6,72 @@ * Change Logs: * Date Author Notes * 2022-11-26 GuEe-GUI first version + * 2024-05-01 GuEe-GUI update for new clk */ #include #include -#include - -static rt_err_t fixed_clk_ofw_init(struct rt_platform_device *pdev, struct rt_clk_fixed_rate *clk_fixed) +struct clk_fixed { - rt_err_t err = RT_EOK; - rt_uint32_t rate, accuracy; - struct rt_ofw_node *np = pdev->parent.ofw_node; - const char *clk_name = np->name; - - if (!rt_ofw_prop_read_u32(np, "clock-frequency", &rate)) - { - rt_ofw_prop_read_u32(np, "clock-accuracy", &accuracy); - rt_ofw_prop_read_string(np, "clock-output-names", &clk_name); + struct rt_clk_node parent; - clk_fixed->clk.name = clk_name; - clk_fixed->clk.rate = rate; - clk_fixed->clk.min_rate = rate; - clk_fixed->clk.max_rate = rate; - clk_fixed->fixed_rate = rate; - clk_fixed->fixed_accuracy = accuracy; + struct rt_clk_fixed_rate fcell; + struct rt_clk_cell *cells[1]; +}; - rt_ofw_data(np) = &clk_fixed->clk; - } - else - { - err = -RT_EIO; - } +static rt_ubase_t fixed_clk_recalc_rate(struct rt_clk_cell *cell, rt_ubase_t parent_rate) +{ + struct rt_clk_fixed_rate *fr = rt_container_of(cell, struct rt_clk_fixed_rate, cell); - return err; + return fr->fixed_rate; } +static struct rt_clk_ops fixed_clk_ops = +{ + .recalc_rate = fixed_clk_recalc_rate, +}; + static rt_err_t fixed_clk_probe(struct rt_platform_device *pdev) { - rt_err_t err = RT_EOK; - struct rt_clk_fixed_rate *clk_fixed = rt_calloc(1, sizeof(*clk_fixed)); + rt_err_t err; + rt_uint32_t val; + struct rt_device *dev = &pdev->parent; + struct clk_fixed *cf = rt_calloc(1, sizeof(*cf)); - if (clk_fixed) + if (!cf) { - err = fixed_clk_ofw_init(pdev, clk_fixed); - } - else - { - err = -RT_ENOMEM; + return -RT_ENOMEM; } - if (!err) + if ((err = rt_dm_dev_prop_read_u32(dev, "clock-frequency", &val))) { - err = rt_clk_register(&clk_fixed->clk, RT_NULL); + goto _fail; } + cf->fcell.fixed_rate = val; + + val = 0; + rt_dm_dev_prop_read_u32(dev, "clock-accuracy", &val); + cf->fcell.fixed_accuracy = val; - if (err && clk_fixed) + rt_dm_dev_prop_read_string(dev, "clock-output-names", &cf->fcell.cell.name); + + cf->parent.dev = dev; + cf->parent.cells_nr = 1; + cf->parent.cells = cf->cells; + cf->cells[0] = &cf->fcell.cell; + cf->fcell.cell.ops = &fixed_clk_ops; + + if ((err = rt_clk_register(&cf->parent))) { - rt_free(clk_fixed); + goto _fail; } + return RT_EOK; + +_fail: + rt_free(cf); + return err; } diff --git a/components/drivers/clk/clk.c b/components/drivers/clk/clk.c index 2c2cc2fb42d..4d8a9c48eaa 100644 --- a/components/drivers/clk/clk.c +++ b/components/drivers/clk/clk.c @@ -7,6 +7,7 @@ * Date Author Notes * 2022-11-26 GuEe-GUI first version * 2025-01-24 wumingzi add doxygen comment + * 2024-05-01 GuEe-GUI make cell for hareware clock */ #include @@ -22,290 +23,265 @@ #define DBG_LVL DBG_INFO #include -static RT_DEFINE_SPINLOCK(_clk_lock); -static rt_list_t _clk_nodes = RT_LIST_OBJECT_INIT(_clk_nodes); +static rt_bool_t clk_ignore_unused = RT_FALSE; + +static struct rt_mutex _clk_lock; +static rt_list_t _clk_node_nodes = RT_LIST_OBJECT_INIT(_clk_node_nodes); static rt_list_t _clk_notifier_nodes = RT_LIST_OBJECT_INIT(_clk_notifier_nodes); -/** - * @brief Release clock node - * - * @param r point to reference count of clock node - * @warning The function only can print log and MORE DETAILS SHOULD BE IMPLEMENTED. - */ -static void clk_release(struct rt_ref *r) +static int clk_init(void) { - struct rt_clk_node *clk_np = rt_container_of(r, struct rt_clk_node, ref); - - LOG_E("%s is release", clk_np->name); - (void)clk_np; +#ifdef RT_USING_OFW + clk_ignore_unused = !!rt_ofw_bootargs_select("clk_ignore_unused", 0); +#endif + rt_mutex_init(&_clk_lock, "CLK", RT_IPC_FLAG_PRIO); - RT_ASSERT(0); + return 0; } +INIT_CORE_EXPORT(clk_init); /** - * @brief Increase reference count for clock node - * - * @param clk_np point to clock node + * @brief Acquire global clock framework lock. * - * @return struct rt_clk_node * point to clock node whose reference count has increased */ -rt_inline struct rt_clk_node *clk_get(struct rt_clk_node *clk_np) +static void clk_lock(void) { - rt_ref_get(&clk_np->ref); - - return clk_np; + if (rt_thread_self()) + { + rt_mutex_take(&_clk_lock, RT_WAITING_FOREVER); + } } /** - * @brief Decrease reference count for clock node - * - * @param clk_np point to clock node + * @brief Release global clock framework lock. * */ -rt_inline void clk_put(struct rt_clk_node *clk_np) +static void clk_unlock(void) { - rt_ref_put(&clk_np->ref, &clk_release); + if (rt_thread_self()) + { + rt_mutex_release(&_clk_lock); + } } /** * @brief Allocate memory space for struct clock and return it * - * @param clk_np point to clock node + * @param cell point to clock cell * @param dev_id device identifier for the clock * @param con_id connection identifier for the clock - * @param fw_node point to the firmware node associated with the clock * * @return struct rt_clk* point to clock */ -static struct rt_clk *clk_alloc(struct rt_clk_node *clk_np, const char *dev_id, - const char *con_id, void *fw_node) +static struct rt_clk *clk_alloc(struct rt_clk_cell *cell, const char *dev_id, const char *con_id) { struct rt_clk *clk = rt_calloc(1, sizeof(*clk)); - if (clk) + if (!clk) { - clk->clk_np = clk_np; - clk->dev_id = dev_id; - clk->con_id = con_id; + LOG_E("%s not memory to create CLK for dev_id = %s con_id = %s", + cell->name, dev_id, con_id); + return RT_NULL; + } + + clk->cell = cell; - clk->fw_node = fw_node; + clk->dev_id = dev_id; + clk->con_id = con_id; + + if (cell->clk) + { + clk->min_rate = cell->clk->min_rate; + clk->max_rate = cell->clk->max_rate; } else { - clk = rt_err_ptr(-RT_ENOMEM); + clk->min_rate = 0; + clk->max_rate = ~0UL; } return clk; } /** - * @brief Free memory space of clock object + * @brief Update clk cell hardware information * - * @param clk point to clock + * @param cell point to clock cell + * @param clk bind clk * */ -static void clk_free(struct rt_clk *clk) +static void clk_cell_bind(struct rt_clk_cell *cell, struct rt_clk *clk) { - struct rt_clk_node *clk_np = clk->clk_np; - - if (clk_np && clk_np->ops->finit) + if (!cell->clk) { - clk_np->ops->finit(clk); + cell->clk = clk; } - - rt_free(clk); -} - -/** - * @brief Allocate memory space and creat clock object - * - * @param clk_np point to clock node - * @param dev_id device identifier for the clock - * @param con_id connection identifier for the clock - * @param fw_data point to the firmware data associated with the clock - * @param fw_node point to the firmware node associated with the clock - * - * @return struct rt_clk* point to clock - */ -static struct rt_clk *clk_create(struct rt_clk_node *clk_np, const char *dev_id, - const char *con_id, void *fw_data, void *fw_node) -{ - struct rt_clk *clk = clk_alloc(clk_np, dev_id, con_id, fw_node); - - if (!rt_is_err(clk)) + else { - clk_get(clk_np); - - if (clk_np->ops->init && clk_np->ops->init(clk, fw_data)) + if (!cell->clk->dev_id) { - LOG_E("Dev[%s] Con[%s] init fail", dev_id, con_id); + cell->clk->dev_id = clk->dev_id; + } - clk_free(clk); - clk = RT_NULL; + if (!cell->clk->con_id) + { + cell->clk->con_id = clk->con_id; } } - - return clk; } /** - * @brief Notify corresponding clock from all + * @brief Register a clock node into the global clock framework. * - * @param clk_np point to clock node - * @param msg message identifier for the event - * @param old_rate old rate of the clock before the event - * @param new_rate new rate of the clock after the event + * This function initializes an @ref rt_clk_node structure and inserts it + * into the global clock node list. Each node may contain multiple + * @ref rt_clk_cell instances, which represent the individual output clocks + * provided by the node. * - * @return rt_err_t RT_EOK on notify clock sucessfully, and other value is failed. + * If the node is associated with a device (clk_np->dev is not NULL), + * the framework will automatically try to obtain its parent clocks by calling + * rt_clk_get_array(clk_np->dev). Otherwise, the node will be treated as a + * root-level provider (e.g., fixed clock). + * + * The caller must ensure that all @ref rt_clk_cell entries are allocated + * and linked to the node before calling this function. + * + * @param clk_np Pointer to the clock node to be registered. + * + * @retval RT_EOK Successfully registered. + * @retval -RT_EINVAL Invalid argument or missing cell list. + * @retval -RT_ENOMEM Failed to allocate parent clock array. */ -static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate) +rt_err_t rt_clk_register(struct rt_clk_node *clk_np) { rt_err_t err = RT_EOK; - struct rt_clk_notifier *notifier; + struct rt_clk_cell *cell; - rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list) + if (!clk_np || !clk_np->cells_nr || !clk_np->cells) { - if (notifier->clk->clk_np == clk_np) - { - err = notifier->callback(notifier, msg, old_rate, new_rate); + return -RT_EINVAL; + } - /* Only check hareware's error */ - if (err == -RT_EIO) - { - break; - } + if (clk_np->dev && !rt_is_err(clk_np->parents_clk)) + { + clk_np->parents_clk = rt_clk_get_array(clk_np->dev); + + if (rt_is_err(clk_np->parents_clk)) + { + return rt_ptr_err(clk_np->parents_clk); } } - return err; -} +#if RT_NAME_MAX > 0 + rt_strncpy(clk_np->parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX); +#else + clk_np->parent.name = RT_CLK_NODE_OBJ_NAME; +#endif -/** - * @brief Set parent clock - * - * @param clk_np point to clock node - * @param parent_np point to parent rt_clk - * - */ -static void clk_set_parent(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np) -{ - rt_hw_spin_lock(&_clk_lock.lock); + rt_list_init(&clk_np->parent.list); - clk_np->parent = parent_np; + for (int i = 0; i < clk_np->cells_nr; ++i) + { + cell = clk_np->cells[i]; - rt_list_insert_after(&parent_np->children_nodes, &clk_np->list); + if (!cell) + { + continue; + } - rt_hw_spin_unlock(&_clk_lock.lock); -} + cell->clk_np = clk_np; -static const struct rt_clk_ops unused_clk_ops = -{ -}; + cell->rate = 0; + cell->prepare_count = 0; + cell->enable_count = 0; + } -/** - * @brief Register clock node into clock list - * - * @param clk_np point to child node that will be registered node. - * @param parent_np point to parent rt_clk. If it is RT_NULL, clock node will be linked to init node. - * - * @retval RT_EOK - * @retval -RT_ENOMEM - */ -rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np) -{ - rt_err_t err = RT_EOK; - struct rt_clk *clk = RT_NULL; + clk_lock(); + rt_list_insert_after(&_clk_node_nodes, &clk_np->parent.list); + clk_unlock(); - if (clk_np) +#ifdef RT_USING_OFW + if (clk_np->dev && clk_np->dev->ofw_node) { - clk_np->clk = clk; + rt_bool_t set_ofw_data = RT_FALSE; + struct rt_ofw_node *np = clk_np->dev->ofw_node; - if (!clk_np->ops) + if (!rt_ofw_data(np)) { - clk_np->ops = &unused_clk_ops; + set_ofw_data = RT_TRUE; + rt_ofw_data(np) = &clk_np->parent; } - #if RT_NAME_MAX > 0 - rt_strncpy(clk_np->rt_parent.name, RT_CLK_NODE_OBJ_NAME, RT_NAME_MAX); - #else - clk_np->rt_parent.name = RT_CLK_NODE_OBJ_NAME; - #endif - - rt_ref_init(&clk_np->ref); - rt_list_init(&clk_np->list); - rt_list_init(&clk_np->children_nodes); - clk_np->multi_clk = 0; - - if (parent_np) + if ((err = rt_ofw_clk_set_defaults(np))) { - clk_np->clk = clk_alloc(clk_np, RT_NULL, RT_NULL, RT_NULL); - - if (clk_np->clk) - { - clk_set_parent(clk_np, parent_np); - } - else + if (set_ofw_data) { - err = -RT_ENOMEM; + rt_ofw_data(np) = RT_NULL; } } - else - { - clk_np->parent = RT_NULL; - - rt_hw_spin_lock(&_clk_lock.lock); - - rt_list_insert_after(&_clk_nodes, &clk_np->list); - - rt_hw_spin_unlock(&_clk_lock.lock); - } } - else +#endif /* RT_USING_OFW */ + + if (err) { - err = -RT_ENOMEM; + if (!rt_is_err(clk_np->parents_clk)) + { + rt_clk_array_put(clk_np->parents_clk); + } } return err; } /** - * @brief Unregister clock node from clock list + * @brief Unregister a clock node from global clock list * - * @param clk_np point to child node that will be Unregistered node. + * This API is intended for rollback use only, typically called + * when a clock provider fails after registration but before any + * consumer starts using its clocks. * - * @retval RT_EOK - * @retval -RT_EBUSY - * @retval -RT_EINVAL + * It removes the specified clock node from the global list and + * releases its parent clock array if present. The function does + * not free memory of @ref rt_clk_cell instances or the node itself. + * + * @param clk_np Pointer to the clock node to be unregistered. + * + * @retval RT_EOK Successfully unregistered. + * @retval -RT_EINVAL Invalid parameter. + * @retval -RT_EBUSY One or more cells are active and cannot be removed. */ rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np) { - rt_err_t err = RT_EOK; + struct rt_clk_cell *cell; - if (clk_np) + if (!clk_np) { - err = -RT_EBUSY; - - rt_hw_spin_lock(&_clk_lock.lock); + return -RT_EINVAL; + } - if (rt_list_isempty(&clk_np->children_nodes)) + if (clk_np->cells && clk_np->cells_nr) + { + for (int i = 0; i < clk_np->cells_nr; ++i) { - if (rt_ref_read(&clk_np->ref) <= 1) - { - rt_list_remove(&clk_np->list); - clk_free(clk_np->clk); + cell = clk_np->cells[i]; - err = RT_EOK; + if (cell && cell->enable_count > 0) + { + return -RT_EBUSY; } } - - rt_hw_spin_unlock(&_clk_lock.lock); } - else + + clk_lock(); + rt_list_remove(&clk_np->parent.list); + clk_unlock(); + + if (!rt_is_err(clk_np->parents_clk)) { - err = -RT_EINVAL; + rt_clk_array_put(clk_np->parents_clk); } - return err; + return RT_EOK; } /** @@ -319,18 +295,17 @@ rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np) */ rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier) { - if (!clk || !clk->clk_np || !notifier) + if (!clk || !notifier) { return -RT_EINVAL; } - rt_hw_spin_lock(&_clk_lock.lock); - - ++clk->clk_np->notifier_count; + notifier->clk = clk; rt_list_init(¬ifier->list); - rt_list_insert_after(&_clk_notifier_nodes, ¬ifier->list); - rt_hw_spin_unlock(&_clk_lock.lock); + clk_lock(); + rt_list_insert_after(&_clk_notifier_nodes, ¬ifier->list); + clk_unlock(); return RT_EOK; } @@ -353,51 +328,112 @@ rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier * return -RT_EINVAL; } - rt_hw_spin_lock(&_clk_lock.lock); - + clk_lock(); rt_list_for_each_entry(notifier_find, &_clk_notifier_nodes, list) { - if (notifier_find->clk->clk_np == notifier->clk->clk_np) + if (notifier_find == notifier) { - --clk->clk_np->notifier_count; rt_list_remove(¬ifier->list); break; } } - - rt_hw_spin_unlock(&_clk_lock.lock); + clk_unlock(); return RT_EOK; } +/** + * @brief Notify corresponding clock from all + * + * @param clk_np point to clock node + * @param msg message identifier for the event + * @param old_rate old rate of the clock before the event + * @param new_rate new rate of the clock after the event + * + * @return rt_err_t RT_EOK on notify clock sucessfully, and other value is failed. + */ +static rt_err_t clk_notify(struct rt_clk_node *clk_np, rt_ubase_t msg, + rt_ubase_t old_rate, rt_ubase_t new_rate) +{ + rt_err_t err = RT_EOK; + struct rt_clk_notifier *notifier; + + rt_list_for_each_entry(notifier, &_clk_notifier_nodes, list) + { + if (notifier->clk->cell->clk_np == clk_np) + { + err = notifier->callback(notifier, msg, old_rate, new_rate); + + /* Only check hareware's error */ + if (err == -RT_EIO) + { + break; + } + } + } + + return err; +} + +static void clk_unprepare(struct rt_clk *clk); +static void clk_disable(struct rt_clk *clk); +static rt_ubase_t clk_round_rate(struct rt_clk *clk, rt_ubase_t rate); +static rt_err_t clk_set_rate(struct rt_clk *clk, rt_ubase_t rate); +static rt_err_t clk_set_parent(struct rt_clk *clk, struct rt_clk *parent); +static struct rt_clk *clk_get_parent(struct rt_clk *clk); +static rt_ubase_t clk_get_rate(struct rt_clk *clk); + +static struct rt_clk *clk_cell_get_clk(struct rt_clk_cell *cell); + /** * @brief Recursively prepare clock * * @param clk Ponit to clock that will be prepared - * @param clk_np Ponit to clock node that will be prepared * * @return rt_err_t RT_EOK on prepare clock sucessfully, and other value is failed. */ -static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np) +static rt_err_t clk_prepare(struct rt_clk *clk) { - rt_err_t err = RT_EOK; + rt_err_t err; + struct rt_clk *parent; + struct rt_clk_cell *cell; + + cell = clk->cell; - if (clk_np->parent) + /* Already prepared */ + if (cell->prepare_count++ > 0) { - clk_prepare(clk_np->clk, clk_np->parent); + return RT_EOK; } - if (clk->prepare_count == 0 && clk_np->ops->prepare) + parent = clk_get_parent(clk); + + if (parent) { - err = clk_np->ops->prepare(clk); + if ((err = clk_prepare(parent))) + { + goto _fail; + } } - if (!err) + if (cell->ops->prepare) { - ++clk->prepare_count; + if ((err = cell->ops->prepare(cell))) + { + if (parent) + { + clk_unprepare(parent); + } + + goto _fail; + } } + return RT_EOK; + +_fail: + --cell->prepare_count; return err; } @@ -410,17 +446,19 @@ static rt_err_t clk_prepare(struct rt_clk *clk, struct rt_clk_node *clk_np) */ rt_err_t rt_clk_prepare(struct rt_clk *clk) { - rt_err_t err = RT_EOK; + rt_err_t err; RT_DEBUG_NOT_IN_INTERRUPT; - if (clk && clk->clk_np) + if (clk) { - rt_hw_spin_lock(&_clk_lock.lock); - - err = clk_prepare(clk, clk->clk_np); - - rt_hw_spin_unlock(&_clk_lock.lock); + clk_lock(); + err = clk_prepare(clk); + clk_unlock(); + } + else + { + err = RT_EOK; } return err; @@ -430,42 +468,50 @@ rt_err_t rt_clk_prepare(struct rt_clk *clk) * @brief Recursively unprepare clock * * @param clk Ponit to clock that will be unprepared - * @param clk_np Ponit to clock node that will be unprepared * */ -static void clk_unprepare(struct rt_clk *clk, struct rt_clk_node *clk_np) +static void clk_unprepare(struct rt_clk *clk) { - if (clk_np->parent) + struct rt_clk *parent; + struct rt_clk_cell *cell; + + cell = clk->cell; + + /* Don't unprepare readly */ + if (cell->prepare_count-- > 1) { - clk_unprepare(clk_np->clk, clk_np->parent); + return; } - if (clk->prepare_count == 1 && clk_np->ops->unprepare) + if (cell->ops->unprepare) { - clk_np->ops->unprepare(clk); + cell->ops->unprepare(cell); } - if (clk->prepare_count) + + parent = clk_get_parent(clk); + + if (parent) { - --clk->prepare_count; + clk_unprepare(parent); } } -rt_err_t rt_clk_unprepare(struct rt_clk *clk) +/** + * @brief Unprepare clock + * + * @param clk Ponit to clock that will be unprepared + * + */ +void rt_clk_unprepare(struct rt_clk *clk) { - rt_err_t err = RT_EOK; - RT_DEBUG_NOT_IN_INTERRUPT; - if (clk && clk->clk_np) + if (clk) { - rt_hw_spin_lock(&_clk_lock.lock); - - clk_unprepare(clk, clk->clk_np); - - rt_hw_spin_unlock(&_clk_lock.lock); + clk_lock(); + clk_unprepare(clk); + clk_unlock(); } - - return err; } /** @@ -475,25 +521,47 @@ rt_err_t rt_clk_unprepare(struct rt_clk *clk) * * @return rt_err_t RT_EOK on enable clock FOREVER. */ -static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np) +static rt_err_t clk_enable(struct rt_clk *clk) { - rt_err_t err = RT_EOK; + rt_err_t err; + struct rt_clk *parent; + struct rt_clk_cell *cell; - if (clk_np->parent) + cell = clk->cell; + + /* Already enabled */ + if (cell->enable_count++ > 0) { - clk_enable(clk_np->clk, clk_np->parent); + return RT_EOK; } - if (clk->enable_count == 0 && clk_np->ops->enable) + parent = clk_get_parent(clk); + + if (parent) { - err = clk_np->ops->enable(clk); + if ((err = clk_enable(parent))) + { + goto _fail; + } } - if (!err) + if (cell->ops->enable) { - ++clk->enable_count; + if ((err = cell->ops->enable(cell))) + { + if (parent) + { + clk_disable(parent); + } + + goto _fail; + } } + return RT_EOK; + +_fail: + --cell->enable_count; return err; } @@ -506,15 +574,17 @@ static rt_err_t clk_enable(struct rt_clk *clk, struct rt_clk_node *clk_np) */ rt_err_t rt_clk_enable(struct rt_clk *clk) { - rt_err_t err = RT_EOK; + rt_err_t err; - if (clk && clk->clk_np) + if (clk) { - rt_hw_spin_lock(&_clk_lock.lock); - - err = clk_enable(clk, clk->clk_np); - - rt_hw_spin_unlock(&_clk_lock.lock); + clk_lock(); + err = clk_enable(clk); + clk_unlock(); + } + else + { + err = RT_EOK; } return err; @@ -524,23 +594,46 @@ rt_err_t rt_clk_enable(struct rt_clk *clk) * @brief Recursively disable clock * * @param clk Ponit to clock that will be disabled - * @param clk_np Ponit to clock node that will be disabled * */ -static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np) +static void clk_disable(struct rt_clk *clk) { - if (clk_np->parent) + struct rt_clk *parent; + struct rt_clk_cell *cell; + + cell = clk->cell; + + if (cell->enable_count == 0) + { + LOG_W("%s: Disable called with count = 0", cell->name); + return; + } + + if (cell->enable_count-- > 1) + { + return; + } + + if (cell->flags & RT_CLK_F_IS_CRITICAL) + { + return; + } + + if (clk_ignore_unused && cell->flags & RT_CLK_F_IGNORE_UNUSED) { - clk_disable(clk_np->clk, clk_np->parent); + return; } - if (clk->enable_count == 1 && clk_np->ops->disable) + if (cell->ops->disable) { - clk_np->ops->disable(clk); + cell->ops->disable(cell); } - if (clk->enable_count) + + parent = clk_get_parent(clk); + + if (parent) { - --clk->enable_count; + clk_disable(parent); } } @@ -552,13 +645,11 @@ static void clk_disable(struct rt_clk *clk, struct rt_clk_node *clk_np) */ void rt_clk_disable(struct rt_clk *clk) { - if (clk && clk->clk_np) + if (clk) { - rt_hw_spin_lock(&_clk_lock.lock); - - clk_disable(clk, clk->clk_np); - - rt_hw_spin_unlock(&_clk_lock.lock); + clk_lock(); + clk_disable(clk); + clk_unlock(); } } @@ -571,23 +662,25 @@ void rt_clk_disable(struct rt_clk *clk) */ rt_err_t rt_clk_prepare_enable(struct rt_clk *clk) { - rt_err_t err = RT_EOK; + rt_err_t err; RT_DEBUG_NOT_IN_INTERRUPT; if (clk) { - err = rt_clk_prepare(clk); - - if (!err) + clk_lock(); + if (!(err = clk_prepare(clk))) { - err = rt_clk_enable(clk); - - if (err) + if ((err = clk_enable(clk))) { - rt_clk_unprepare(clk); + clk_unprepare(clk); } } + clk_unlock(); + } + else + { + err = RT_EOK; } return err; @@ -605,8 +698,10 @@ void rt_clk_disable_unprepare(struct rt_clk *clk) if (clk) { - rt_clk_disable(clk); - rt_clk_unprepare(clk); + clk_lock(); + clk_disable(clk); + clk_unprepare(clk); + clk_unlock(); } } @@ -619,7 +714,7 @@ void rt_clk_disable_unprepare(struct rt_clk *clk) */ rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr) { - rt_err_t err = RT_EOK; + rt_err_t err; if (clk_arr) { @@ -639,29 +734,29 @@ rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr) } } } + else + { + err = RT_EOK; + } return err; } -rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr) +/** + * @brief Unprepare clock array for mutipule out clock + * + * @param clk_arr point to clock array + * + */ +void rt_clk_array_unprepare(struct rt_clk_array *clk_arr) { - rt_err_t err = RT_EOK; - if (clk_arr) { for (int i = 0; i < clk_arr->count; ++i) { - if ((err = rt_clk_unprepare(clk_arr->clks[i]))) - { - LOG_E("CLK Array[%d] %s failed error = %s", i, - "unprepare", rt_strerror(err)); - - break; - } + rt_clk_unprepare(clk_arr->clks[i]); } } - - return err; } /** @@ -673,7 +768,7 @@ rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr) */ rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr) { - rt_err_t err = RT_EOK; + rt_err_t err; if (clk_arr) { @@ -693,6 +788,10 @@ rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr) } } } + else + { + err = RT_EOK; + } return err; } @@ -752,51 +851,85 @@ void rt_clk_array_disable_unprepare(struct rt_clk_array *clk_arr) } /** - * @brief Set clock rate range + * @brief Set and clamp clock rate within specified range. * - * @param clk point to clock - * @param min minimum clock rate - * @param max minimum clock rate + * @details This function updates the minimum and maximum allowed rate + * of a clock, clamps its current rate to the new range, and + * immediately applies the change via set_rate(). * - * @return rt_err_t RT_EOK on set clock rate range sucessfully, and other value is failed. + * @param clk Pointer to clock handle. + * @param min Minimum allowed rate (Hz). + * @param max Maximum allowed rate (Hz). + * + * @retval RT_EOK Successfully updated. + * @retval -RT_EINVAL Invalid parameter or range. + * @retval -RT_ENOSYS Clock driver does not support set_rate(). + * @retval other Hardware-specific error returned by set_rate(). */ -rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max) +static rt_err_t clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max) { - rt_err_t err = RT_EOK; + rt_err_t err; + rt_ubase_t rate, old_min, old_max; + struct rt_clk_cell *cell; - if (clk && clk->clk_np) + if (min > max) { - struct rt_clk_node *clk_np = clk->clk_np; + return -RT_EINVAL; + } - rt_hw_spin_lock(&_clk_lock.lock); + cell = clk->cell; - if (clk_np->ops->set_rate) - { - rt_ubase_t rate = clk_np->rate; - rt_ubase_t old_min = clk_np->min_rate; - rt_ubase_t old_max = clk_np->max_rate; + old_min = clk->min_rate; + old_max = clk->max_rate; - clk_np->min_rate = min; - clk_np->max_rate = max; + clk->min_rate = min; + clk->max_rate = max; - rate = rt_clamp(rate, min, max); - err = clk_np->ops->set_rate(clk, rate, - rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL)); + if (cell->flags & RT_CLK_F_GET_RATE_NOCACHE) + { + rate = clk_get_rate(clk); + } + else + { + rate = cell->rate; + } - if (err) - { - clk_np->min_rate = old_min; - clk_np->max_rate = old_max; - } - } - else - { - err = -RT_ENOSYS; - } + if ((err = clk_set_rate(clk, rt_clamp(rate, min, max)))) + { + goto _fail; + } - rt_hw_spin_unlock(&_clk_lock.lock); + return RT_EOK; + +_fail: + clk->min_rate = old_min; + clk->max_rate = old_max; + + return err; +} + +/** + * @brief Set clock rate range + * + * @param clk point to clock + * @param min minimum clock rate + * @param max minimum clock rate + * + * @return rt_err_t RT_EOK on set clock rate range sucessfully, and other value is failed. + */ +rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t max) +{ + rt_err_t err; + + if (!clk) + { + return RT_EOK; } + clk_lock(); + err = clk_set_rate_range(clk, min, max); + clk_unlock(); + return err; } @@ -810,16 +943,12 @@ rt_err_t rt_clk_set_rate_range(struct rt_clk *clk, rt_ubase_t min, rt_ubase_t ma */ rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate) { - rt_err_t err = RT_EOK; - - if (clk && clk->clk_np) + if (clk) { - struct rt_clk_node *clk_np = clk->clk_np; - - err = rt_clk_set_rate_range(clk, rate, clk_np->max_rate); + return rt_clk_set_rate_range(clk, rate, clk->max_rate); } - return err; + return RT_EOK; } /** @@ -832,13 +961,158 @@ rt_err_t rt_clk_set_min_rate(struct rt_clk *clk, rt_ubase_t rate) */ rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate) { - rt_err_t err = RT_EOK; + if (clk) + { + return rt_clk_set_rate_range(clk, clk->min_rate, rate); + } + + return RT_EOK; +} + +/** + * @brief Set clock rate. + * + * @details This function directly sets the frequency of the given clock. + * If the hardware driver supports set_rate(), the new rate will + * be applied immediately and the cached rate will be updated. + * + * @param clk Pointer to clock handle. + * @param rate Target frequency (Hz). + * + * @retval RT_EOK Successfully updated. + * @retval -RT_EINVAL Invalid parameter. + * @retval -RT_ENOSYS Clock driver does not support set_rate(). + * @retval other Hardware-specific error returned by set_rate(). + */ +static rt_err_t clk_set_rate(struct rt_clk *clk, rt_ubase_t rate) +{ + rt_err_t err; + rt_ubase_t old_rate, prate; + rt_bool_t was_enabled = RT_FALSE; + rt_bool_t was_disabled = RT_FALSE; + struct rt_clk *parent = RT_NULL; + struct rt_clk_node *clk_np; + struct rt_clk_cell *cell; + + cell = clk->cell; + + if (!cell->ops->set_rate) + { + return -RT_ENOSYS; + } + + clk_np = cell->clk_np; + + if (cell->flags & RT_CLK_F_GET_RATE_NOCACHE) + { + old_rate = clk_get_rate(clk); + } + else + { + old_rate = cell->rate; + } + rate = rt_clamp(rate, clk->min_rate, clk->max_rate); + + parent = clk_get_parent(clk); + + if (cell->parents_nr > 1) + { + rt_uint8_t best_idx = RT_UINT8_MAX; + rt_ubase_t best_rounded = 0, best_diff = ~0UL; + struct rt_clk_cell *parent_cell, *best_parent_cell = RT_NULL; + + for (rt_uint8_t idx = 0; idx < cell->parents_nr; ++idx) + { + rt_ubase_t rounded, diff; + + if (!(parent_cell = rt_clk_cell_get_parent_by_index(cell, idx))) + { + continue; + } + + if (!parent_cell->clk && !(parent_cell->clk = clk_cell_get_clk(parent_cell))) + { + return RT_NULL; + } + + prate = clk_get_rate(parent_cell->clk); + rounded = clk_round_rate(parent_cell->clk, rate); + rounded = (rounded > 0) ? rounded : rate; + + diff = rt_abs(rounded - rate); + + if (diff < best_diff) + { + best_idx = idx; + best_diff = diff; + best_rounded = rounded; + best_parent_cell = parent_cell; + } + } + + if (best_idx != RT_UINT8_MAX && parent->cell != best_parent_cell) + { + parent = best_parent_cell->clk; + + if ((err = clk_set_parent(clk, parent))) + { + return err; + } + + rate = best_rounded; + } + } + + if (parent) + { + if (cell->flags & RT_CLK_F_SET_RATE_PARENT) + { + if ((err = clk_set_rate(parent, rate))) + { + return err; + } + } + + prate = clk_get_rate(parent); + } + else + { + prate = 0; + } + + if ((cell->flags & RT_CLK_F_SET_RATE_GATE) && cell->enable_count > 0) + { + was_enabled = RT_TRUE; + clk_disable(clk); + } + else if ((cell->flags & RT_CLK_F_SET_RATE_UNGATE) && cell->enable_count == 0) + { + was_disabled = RT_TRUE; + clk_enable(clk); + } - if (clk && clk->clk_np) + clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, rate); + + if ((err = cell->ops->set_rate(cell, rate, prate))) { - struct rt_clk_node *clk_np = clk->clk_np; + clk_notify(clk_np, RT_CLK_MSG_ABORT_RATE_CHANGE, old_rate, rate); + + goto _end; + } + + /* Update cached rate */ + cell->rate = rate; - err = rt_clk_set_rate_range(clk, clk_np->min_rate, rate); + clk_notify(clk_np, RT_CLK_MSG_POST_RATE_CHANGE, old_rate, rate); + +_end: + if (was_enabled) + { + clk_enable(clk); + } + else if (was_disabled) + { + clk_disable(clk); } return err; @@ -854,76 +1128,361 @@ rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate) */ rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate) { - rt_err_t err = RT_EOK; + rt_err_t err; - rate = rt_clk_round_rate(clk, rate); + if (clk) + { + clk_lock(); + err = clk_set_rate(clk, rate); + clk_unlock(); + } + else + { + err = RT_EOK; + } + + return err; +} + +/** + * @brief Internal helper to get clock rate (no locking, no validation). + * + * @param clk Pointer to clock handle. + * + * @return Clock frequency in Hz, or 0 if invalid. + */ +static rt_ubase_t clk_get_rate(struct rt_clk *clk) +{ + rt_ubase_t prate; + struct rt_clk *parent; + struct rt_clk_cell *cell; + + cell = clk->cell; + parent = clk_get_parent(clk); + prate = parent ? clk_get_rate(parent) : 0; + + if (cell->ops->recalc_rate) + { + cell->rate = cell->ops->recalc_rate(cell, prate); + } + else + { + cell->rate = prate; + } + + return cell->rate; +} + +/** + * @brief Get clock rate + * + * @param clk point to clock + * + * @return rt_ubase_t clock rate or error code + */ +rt_ubase_t rt_clk_get_rate(struct rt_clk *clk) +{ + rt_ubase_t rate; - if (clk && clk->clk_np && rate > 0) + if (clk) + { + clk_lock(); + rate = clk_get_rate(clk); + clk_unlock(); + } + else { - struct rt_clk_node *clk_np = clk->clk_np; + rate = 0; + } + + return rate; +} + +/** + * @brief Internal helper to round clock rate (no locking). + * + * @param clk Pointer to clock handle. + * @param rate Desired frequency in Hz. + * + * @return Rounded frequency in Hz (may differ from requested value). + */ +static rt_ubase_t clk_round_rate(struct rt_clk *clk, rt_ubase_t rate) +{ + rt_ubase_t prate, rounded = rate; + struct rt_clk *parent; + struct rt_clk_cell *cell; + + cell = clk->cell; - rt_hw_spin_lock(&_clk_lock.lock); + parent = clk_get_parent(clk); + prate = parent ? clk_get_rate(parent) : 0; - if (clk_np->min_rate && rate < clk_np->min_rate) + /* If driver provides round_rate() callback, use it */ + if (cell->ops->round_rate) + { + rt_base_t res = cell->ops->round_rate(cell, rate, &prate); + + if (res > 0) { - err = -RT_EINVAL; + rounded = res; } + } + else if ((cell->flags & RT_CLK_F_SET_RATE_PARENT) && parent) + { + /* Delegate rounding to parent clock if supported */ + rounded = clk_round_rate(parent, rate); + } + + /* Clamp to valid range */ + return rt_clamp(rounded, clk->min_rate, clk->max_rate); +} - if (clk_np->max_rate && rate > clk_np->max_rate) +/** + * @brief Check if clock rate is in the minimum to maximun and get it + * + * @param clk point to clock + * @param rate rate will be checked + * + * @return rt_base_t get the correct rate + * @note if parameter rate less than the minimum or more than maximum, the + retrun rate will be set to minimum ormaximum value + */ +rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate) +{ + rt_ubase_t rounded = 0; + + if (clk) + { + clk_lock(); + rounded = clk_round_rate(clk, rate); + clk_unlock(); + } + + return rounded; +} + +/** + * @brief Set parent clock + * + * @param clk_np point to clock node + * @param parent_np point to parent rt_clk + * + * @return rt_err_t RT_EOK on set clock parent sucessfully, and other value is failed. + */ +static rt_err_t clk_set_parent(struct rt_clk *clk, struct rt_clk *parent) +{ + rt_err_t err; + rt_uint8_t idx = RT_UINT8_MAX; + rt_bool_t was_enabled = RT_FALSE; + struct rt_clk_cell *cell; + + cell = clk->cell; + + /* Already same parent? */ + if (parent) + { + if (cell->parent == parent->cell->clk) { - err = -RT_EINVAL; + return RT_EOK; } + } + else if (!cell->parent) + { + return RT_EOK; + } - if (!err) + /* No multi-parent */ + if (cell->parents_nr <= 1) + { + return -RT_EINVAL; + } + + /* Multi-parent but driver lacks support */ + if (!cell->ops->set_parent) + { + return -RT_EINVAL; + } + + /* Find new parent index if provided */ + if (parent) + { + const char *pname = parent->cell->name; + + /* Temporarily gate if required */ + if ((cell->flags & RT_CLK_F_SET_PARENT_GATE) && cell->enable_count > 0) { - if (clk_np->ops->set_rate) + was_enabled = RT_TRUE; + clk_disable(clk); + } + + for (int i = 0; i < cell->parents_nr; ++i) + { + if (!rt_strcmp(cell->parent_names[i], pname)) { - rt_ubase_t old_rate = clk_np->rate; + idx = i; + break; + } + } - err = clk_np->ops->set_rate(clk, rate, - rt_clk_get_rate(clk_np->parent ? clk_np->parent->clk : RT_NULL)); + if (idx == RT_UINT8_MAX) + { + LOG_W("%s: Invalid parent %s", cell->name, pname); + err = -RT_EINVAL; + goto _end; + } - if (clk_np->rate != old_rate) - { - clk_notify(clk_np, RT_CLK_MSG_PRE_RATE_CHANGE, old_rate, clk_np->rate); - } - } - else + if (cell->ops->set_parent) + { + if (!(err = cell->ops->set_parent(cell, idx))) { - err = -RT_ENOSYS; + cell->parent = parent->cell->clk; } } + else + { + err = -RT_ENOSYS; + } - rt_hw_spin_unlock(&_clk_lock.lock); + _end: + if (was_enabled) + { + clk_enable(clk); + } + } + else + { + err = RT_EOK; } return err; } /** - * @brief Get clock rate + * @brief Set clock parent object * * @param clk point to clock + * @param clk_parent point to parent clock * - * @return rt_ubase_t clock rate or error code + * @return rt_err_t RT_EOK on set clock parent sucessfully, and other value is failed. */ -rt_ubase_t rt_clk_get_rate(struct rt_clk *clk) +rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent) { - rt_ubase_t rate = 0; + rt_err_t err; if (clk) { - if (clk->rate) + clk_lock(); + err = clk_set_parent(clk, clk_parent); + clk_unlock(); + } + else + { + err = RT_EOK; + } + + return err; +} + +/** + * @brief Resolve and return the parent clock of a given clock handle. + * + * @details This function determines the parent clock for the provided + * clock handle (`clk`). It first checks for an existing cached + * parent, and if none exists: + * 1. Calls the driver's `get_parent()` callback to retrieve + * the parent index. + * 2. Looks up the corresponding parent clock by name from: + * - The controller's `parents_clk` array, or + * - The controller's own `cells` array. + * 3. If a matching cell exists but no rt_clk handle yet, a new + * handle is allocated via `clk_alloc()`. + * 4. The resolved parent is cached in `cell->parent`. + * + * @param clk Pointer to the clock handle. + * + * @return Pointer to the parent clock handle, or NULL on failure. + */ +static struct rt_clk *clk_get_parent(struct rt_clk *clk) +{ + rt_uint8_t idx; + struct rt_clk *parent; + struct rt_clk_cell *cell, *parent_cell; + + cell = clk->cell; + + if (!cell->parent_names) + { + return RT_NULL; + } + + if (cell->parent) + { + return cell->parent; + } + + if (cell->parents_nr > 1) + { + if (!cell->ops->get_parent) { - rate = clk->rate; + LOG_E("%s: Missing get_parent() while having parent_names", cell->name); + return RT_NULL; } - else if (clk->clk_np) + + idx = cell->ops->get_parent(cell); + + if (idx >= cell->parents_nr) { - rate = clk->clk_np->rate; + LOG_E("%s: Get parent fail", cell->name); + return RT_NULL; } } + else + { + idx = 0; + } - return rate; + parent_cell = rt_clk_cell_get_parent_by_index(cell, idx); + + if (!parent_cell) + { + return RT_NULL; + } + + if (!parent_cell->clk && !(parent_cell->clk = clk_cell_get_clk(parent_cell))) + { + return RT_NULL; + } + + parent = parent_cell->clk; + cell->parent = parent; + + return parent; +} + +/** + * @brief Get parent clock pointer + * + * @param clk child clock + * + * @return struct rt_clk* parent clock object pointer will be return, unless child + clock node havn't parent node instead return RT_NULL + */ +struct rt_clk *rt_clk_get_parent(struct rt_clk *clk) +{ + struct rt_clk *parent; + + if (clk) + { + clk_lock(); + parent = clk_get_parent(clk); + clk_unlock(); + } + else + { + parent = RT_NULL; + } + + return parent; } /** @@ -936,15 +1495,32 @@ rt_ubase_t rt_clk_get_rate(struct rt_clk *clk) */ rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees) { - rt_err_t err = RT_EOK; + rt_err_t err; - if (clk && clk->clk_np && clk->clk_np->ops->set_phase) + if (clk) { - rt_hw_spin_lock(&_clk_lock.lock); + struct rt_clk_cell *cell = clk->cell; - err = clk->clk_np->ops->set_phase(clk, degrees); + /* Sanity check degrees */ + degrees %= 360; - rt_hw_spin_unlock(&_clk_lock.lock); + if (degrees < 0) + { + degrees += 360; + } + + if (cell->ops->set_phase) + { + err = cell->ops->set_phase(cell, degrees); + } + else + { + err = -RT_ENOSYS; + } + } + else + { + err = RT_EOK; } return err; @@ -959,122 +1535,310 @@ rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees) */ rt_base_t rt_clk_get_phase(struct rt_clk *clk) { - rt_base_t res = RT_EOK; + rt_base_t res; - if (clk && clk->clk_np && clk->clk_np->ops->get_phase) + if (clk) { - rt_hw_spin_lock(&_clk_lock.lock); + struct rt_clk_cell *cell = clk->cell; - res = clk->clk_np->ops->get_phase(clk); - - rt_hw_spin_unlock(&_clk_lock.lock); + if (cell->ops->get_phase) + { + res = cell->ops->get_phase(cell); + } + else + { + res = 0; + } + } + else + { + res = 0; } return res; } /** - * @brief Check if clock rate is in the minimum to maximun and get it + * @brief Check if the clock cell is prepared * - * @param clk point to clock - * @param rate rate will be checked + * @param cell Pointer to clock cell * - * @return rt_base_t get the correct rate - * @note if parameter rate less than the minimum or more than maximum, the - retrun rate will be set to minimum ormaximum value + * @return RT_TRUE if prepared, otherwise RT_FALSE */ -rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate) +rt_bool_t rt_clk_cell_is_prepared(const struct rt_clk_cell *cell) { - rt_base_t res = -RT_EINVAL; + RT_ASSERT(cell != RT_NULL); - if (clk && clk->clk_np) + if (cell->ops->is_prepared) { - struct rt_clk_node *clk_np = clk->clk_np; + return cell->ops->is_prepared((struct rt_clk_cell *)cell); + } - if (clk_np->ops->round_rate) - { - rt_ubase_t best_parent_rate; + return RT_TRUE; +} - rt_hw_spin_lock(&_clk_lock.lock); +/** + * @brief Get or create clock handle for a clock cell + * + * @param cell Pointer to clock cell + * + * @return Pointer to clock handle, or RT_NULL on failure + * + * @note If the clock handle does not exist, it will be created automatically. + */ +static struct rt_clk *clk_cell_get_clk(struct rt_clk_cell *cell) +{ + if (cell->clk) + { + return cell->clk; + } - if (clk_np->min_rate && clk_np->max_rate) - { - rate = rt_clamp(rate, clk_np->min_rate, clk_np->max_rate); - } + cell->clk = clk_alloc(cell, RT_NULL, RT_NULL); - res = clk_np->ops->round_rate(clk, rate, &best_parent_rate); - (void)best_parent_rate; + return cell->clk; +} - rt_hw_spin_unlock(&_clk_lock.lock); +/** + * @brief Get or create clock handle for a clock cell + * + * @param cell Pointer to clock cell + * @param con_id Connection identifier for the clock cell + * + * @return Pointer to clock handle, or RT_NULL on failure + * + * @note If the clock handle does not exist, it will be created automatically. + */ +struct rt_clk *rt_clk_cell_get_clk(const struct rt_clk_cell *cell, const char *con_id) +{ + struct rt_clk *clk; + + RT_ASSERT(cell != RT_NULL); + + if ((clk = clk_cell_get_clk((struct rt_clk_cell *)cell))) + { + if (!clk->con_id) + { + clk->con_id = con_id; } - else + } + + return clk; +} + +/** + * @brief Check if the clock cell is enabled + * + * @param cell Pointer to clock cell + * + * @return RT_TRUE if enabled, otherwise RT_FALSE + */ +rt_bool_t rt_clk_cell_is_enabled(const struct rt_clk_cell *cell) +{ + RT_ASSERT(cell != RT_NULL); + + if (cell->ops->is_enabled) + { + return cell->ops->is_enabled((struct rt_clk_cell *)cell); + } + + return RT_TRUE; +} + +/** + * @brief Get current rate of the clock cell + * + * @param cell Pointer to clock cell + * + * @return Current rate in Hz + */ +rt_ubase_t rt_clk_cell_get_rate(const struct rt_clk_cell *cell) +{ + struct rt_clk *clk; + + RT_ASSERT(cell != RT_NULL); + + clk = clk_cell_get_clk((struct rt_clk_cell *)cell); + + return clk_get_rate(clk); +} + +/** + * @brief Round a desired rate to the nearest supported rate + * + * @param cell Pointer to clock cell + * @param rate Desired frequency in Hz + * + * @return Closest supported frequency in Hz + */ +rt_ubase_t rt_clk_cell_round_rate(struct rt_clk_cell *cell, rt_ubase_t rate) +{ + struct rt_clk *clk; + + RT_ASSERT(cell != RT_NULL); + + clk = clk_cell_get_clk((struct rt_clk_cell *)cell); + + return clk_round_rate(clk, rate); +} + +/** + * @brief Get parent clock cell + * + * @param cell Pointer to clock cell + * + * @return Pointer to parent clock cell, or RT_NULL if none + */ +struct rt_clk_cell *rt_clk_cell_get_parent(const struct rt_clk_cell *cell) +{ + struct rt_clk *clk, *parent_clk; + + RT_ASSERT(cell != RT_NULL); + + clk = clk_cell_get_clk((struct rt_clk_cell *)cell); + + if ((parent_clk = clk_get_parent(clk))) + { + return parent_clk->cell; + } + + return RT_NULL; +} + +/** + * @brief Get parent clock cell by index + * + * @param cell Pointer to clock cell + * @param idx Parent index + * + * @return Pointer to parent clock cell, or RT_NULL if not found + */ +struct rt_clk_cell *rt_clk_cell_get_parent_by_index(const struct rt_clk_cell *cell, rt_uint8_t idx) +{ + const char *pname; + struct rt_clk_cell *parent_cell; + struct rt_clk_node *clk_np, *clk_np_raw; + + RT_ASSERT(cell != RT_NULL); + RT_ASSERT(idx != RT_UINT8_MAX); + + clk_np = cell->clk_np; + + if (cell->parents_nr > 1) + { + pname = cell->parent_names[idx]; + } + else if (idx == 0) + { + pname = cell->parent_name; + } + else + { + pname = RT_NULL; + goto _end; + } + + clk_np_raw = RT_NULL; + +_retry: + if (!rt_is_err_or_null(clk_np->parents_clk)) + { + struct rt_clk_array *parents_clk = clk_np->parents_clk; + + for (rt_uint8_t i = 0; i < parents_clk->count; ++i) { - if (rate < clk_np->min_rate) - { - res = clk_np->min_rate; - } - else if (rate > clk_np->max_rate) - { - res = clk_np->max_rate; - } - else + if (!rt_strcmp(pname, parents_clk->clks[i]->cell->name)) { - res = rate; + return parents_clk->clks[i]->cell; } } } - return res; + for (int i = 0; i < clk_np->cells_nr; ++i) + { + parent_cell = clk_np->cells[i]; + + if (!parent_cell) + { + continue; + } + + if (!rt_strcmp(parent_cell->name, pname)) + { + return (struct rt_clk_cell *)parent_cell; + } + } + + /* Find on the global list */ + if (clk_np_raw) + { + do { + clk_np = rt_list_entry(clk_np->parent.list.next, rt_typeof(*clk_np), parent.list); + } while (&clk_np->parent.list != &_clk_node_nodes && clk_np == clk_np_raw); + } + else + { + clk_np_raw = clk_np; + clk_np = rt_list_entry(_clk_node_nodes.next, rt_typeof(*clk_np), parent.list); + } + + if (&clk_np->parent.list != &_clk_node_nodes) + { + goto _retry; + } + +_end: + LOG_E("%s: Parent[%d] '%s' not found", cell->name, idx, pname); + + return RT_NULL; } /** - * @brief Set clock parent object + * @brief Get current parent index * - * @param clk point to clock - * @param clk_parent point to parent clock + * @param cell Pointer to clock cell * - * @return rt_err_t RT_EOK on set clock parent sucessfully, and other value is failed. + * @return Parent index on success, negative error code on failure */ -rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent) +rt_uint8_t rt_clk_cell_get_parent_index(struct rt_clk_cell *cell) { - rt_err_t err = RT_EOK; + RT_ASSERT(cell != RT_NULL); - if (clk && clk->clk_np && clk->clk_np->ops->set_parent) + if (cell->ops->get_parent) { - rt_hw_spin_lock(&_clk_lock.lock); - - err = clk->clk_np->ops->set_parent(clk, clk_parent); - - rt_hw_spin_unlock(&_clk_lock.lock); + return cell->ops->get_parent(cell); } - return err; + return RT_UINT8_MAX; } /** - * @brief Get parent clock pointer + * @brief Set new parent clock cell * - * @param clk child clock + * @param cell Pointer to clock cell + * @param parent Pointer to new parent clock cell * - * @return struct rt_clk* parent clock object pointer will be return, unless child - clock node havn't parent node instead return RT_NULL + * @return RT_EOK on success, or error code on failure */ -struct rt_clk *rt_clk_get_parent(struct rt_clk *clk) +rt_err_t rt_clk_cell_set_parent(struct rt_clk_cell *cell, struct rt_clk_cell *parent) { - struct rt_clk *parent = RT_NULL; + rt_err_t err; + struct rt_clk *clk, *parent_clk = RT_NULL; - if (clk) - { - struct rt_clk_node *clk_np = clk->clk_np; + RT_ASSERT(cell != RT_NULL); - rt_hw_spin_lock(&_clk_lock.lock); + clk = clk_cell_get_clk((struct rt_clk_cell *)cell); - parent = clk_np->parent ? clk_np->parent->clk : RT_NULL; + if (parent) + { + parent_clk = clk_cell_get_clk((struct rt_clk_cell *)parent); + } - rt_hw_spin_unlock(&_clk_lock.lock); + if ((err = clk_set_parent(clk, parent_clk))) + { + return err; } - return parent; + return RT_EOK; } /** @@ -1133,6 +1897,40 @@ struct rt_clk *rt_clk_get_by_name(struct rt_device *dev, const char *name) clk = rt_ofw_get_clk_by_name(dev->ofw_node, name); #endif + if (!clk && name) + { + struct rt_clk_node *clk_np; + struct rt_clk_cell *cell = RT_NULL; + + clk_lock(); + rt_list_for_each_entry(clk_np, &_clk_node_nodes, parent.list) + { + for (int i = 0; i < clk_np->cells_nr; ++i) + { + cell = clk_np->cells[i]; + + if (!cell) + { + continue; + } + + if (!rt_strcmp(cell->name, name)) + { + clk = clk_alloc(cell, rt_dm_dev_get_name(dev), RT_NULL); + + if (clk) + { + clk_cell_bind(cell, clk); + } + + goto _out_lock; + } + } + } + _out_lock: + clk_unlock(); + } + return clk; } @@ -1148,7 +1946,7 @@ void rt_clk_array_put(struct rt_clk_array *clk_arr) { for (int i = 0; i < clk_arr->count; ++i) { - if (clk_arr->clks[i]) + if (!rt_is_err_or_null(clk_arr->clks[i])) { rt_clk_put(clk_arr->clks[i]); } @@ -1170,100 +1968,122 @@ void rt_clk_array_put(struct rt_clk_array *clk_arr) */ void rt_clk_put(struct rt_clk *clk) { - if (clk) + if (clk && clk->cell->clk != clk) { - clk_put(clk->clk_np); - clk_free(clk); + rt_free(clk); } } #ifdef RT_USING_OFW +static struct rt_clk_array *ofw_get_clk_array(struct rt_ofw_node *np, + const char *basename, const char *propname); +static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, + const char *basename, int index, const char *name); + +/** + * @brief Retrieve a clock cell from a clock node using OFW (device tree) arguments. + * + * @details + * This helper function translates parsed device tree clock specifiers + * (from `clocks = <&phandle args...>;`) into an actual `rt_clk_cell` pointer + * belonging to the specified `clk_np` (clock node). + * + * Behavior: + * - If the clock node provides a custom parser (`clk_np->ofw_parse`), + * this function delegates the lookup to that callback. + * → This allows complex clock providers (e.g. multiplexers, dividers) + * to interpret multiple arguments or encoded indices. + * - Otherwise, it assumes the first argument (`args->args[0]`) + * is the cell index and directly returns `clk_np->cells[args->args[0]]`. + * + * This abstraction allows different clock providers to implement flexible + * device-tree bindings without changing the core clock framework. + * + * @param clk_np Pointer to the clock node containing clock cells. + * @param args Pointer to parsed OFW clock arguments (from device tree). + * + * @return Pointer to the resolved `rt_clk_cell` if found, or `RT_NULL` on failure. + * + * @note + * - The default indexing behavior assumes that the clock node’s `#clock-cells` + * property equals 1 (only one integer index). + * - Complex clock providers should implement their own `.ofw_parse()` callback + * to handle multiple argument cases. + * - This function is typically used during `rt_ofw_clk_get()` to + * resolve device clock references. + */ +static struct rt_clk_cell *ofw_get_cell(struct rt_clk_node *clk_np, struct rt_ofw_cell_args *args) +{ + if (clk_np->ofw_parse) + { + return clk_np->ofw_parse(clk_np, args); + } + + return clk_np->cells[args->args[0]]; +} + /** - * @brief Get a clock object from a device tree node without acquiring a lock + * @brief Get clock array from ofw by name * - * @param np point to ofw node - * @param index index of clock in ofw - * @param name connection identifier for the clock - * @param locked lock flag for indicating whether the caller holds the lock + * @param np point to ofw node + * @param basename name of clocks base name + * @param propname name of clocks prop name * - * @return struct rt_clk* point to the newly created clock object, or an error pointer + * @return struct rt_clk_array* point to the newly created clock array, or an error pointer */ -static struct rt_clk *ofw_get_clk_no_lock(struct rt_ofw_node *np, int index, const char *name, rt_bool_t locked) +static struct rt_clk_array *ofw_get_clk_array(struct rt_ofw_node *np, + const char *basename, const char *propname) { - struct rt_clk *clk = RT_NULL; - struct rt_ofw_cell_args clk_args; + int count; + rt_bool_t has_name; + struct rt_clk_array *clk_arr; - if (!rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args)) + if ((count = rt_ofw_count_phandle_cells(np, basename, "#clock-cells")) <= 0) { - int count; - struct rt_object *obj; - struct rt_clk_node *clk_np = RT_NULL; - struct rt_ofw_node *clk_ofw_np = clk_args.data; - - if (!rt_ofw_data(clk_ofw_np)) + if (count) { - if (locked) - { - rt_hw_spin_unlock(&_clk_lock.lock); - } - - rt_platform_ofw_request(clk_ofw_np); - - if (locked) - { - rt_hw_spin_lock(&_clk_lock.lock); - } + return rt_err_ptr(count); } - if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np, - RT_CLK_NODE_OBJ_NAME, "#clock-cells"))) - { - clk_np = rt_container_of(obj, struct rt_clk_node, rt_parent); + return RT_NULL; + } - count = rt_ofw_count_of_clk(clk_ofw_np); - } + clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count); + + if (!clk_arr) + { + return rt_err_ptr(-RT_ENOMEM); + } - rt_ofw_node_put(clk_ofw_np); + clk_arr->count = count; + has_name = rt_ofw_prop_read_bool(np, propname); - if (clk_np) - { - if (count > 1) - { - /* args[0] must be the index of CLK */ - clk_np = &clk_np[clk_args.args[0]]; - } + clk_lock(); + for (int i = 0; i < count; ++i) + { + const char *name = RT_NULL; - clk = clk_create(clk_np, np->full_name, name, &clk_args, np); - } - else + if (has_name) { - clk = rt_err_ptr(-RT_ERROR); + rt_ofw_prop_read_string_index(np, "clock-names", i, &name); } - } - return clk; -} + clk_arr->clks[i] = ofw_get_clk(np, basename, i, name); -/** - * @brief Get clock from ofw with acquiring a spin lock - * - * @param np point to ofw node - * @param index index of clock in ofw - * @param name connection identifier for the clock - * - * @return struct rt_clk* point to the newly created clock object, or an error pointer - */ -static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char *name) -{ - struct rt_clk *clk; + if (rt_is_err(clk_arr->clks[i])) + { + rt_err_t err = rt_ptr_err(clk_arr->clks[i]); - rt_hw_spin_lock(&_clk_lock.lock); + clk_unlock(); - clk = ofw_get_clk_no_lock(np, index, name, RT_TRUE); + rt_clk_array_put(clk_arr); - rt_hw_spin_unlock(&_clk_lock.lock); + return rt_err_ptr(err); + } + } + clk_unlock(); - return clk; + return clk_arr; } /** @@ -1275,59 +2095,115 @@ static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, int index, const char */ struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np) { - int count; - struct rt_clk_array *clk_arr = RT_NULL; - if (!np) { return rt_err_ptr(-RT_EINVAL); } - if ((count = rt_ofw_count_phandle_cells(np, "clocks", "#clock-cells")) > 0) - { - clk_arr = rt_calloc(1, sizeof(*clk_arr) + sizeof(clk_arr->clks[0]) * count); + return ofw_get_clk_array(np, "clocks", "clock-names"); +} - if (clk_arr) - { - int i; - rt_err_t err = RT_EOK; - rt_bool_t has_name = rt_ofw_prop_read_bool(np, "clock-names"); +/** + * @brief Get clock from ofw + * + * @param np point to ofw node + * @param basename name of clocks base name + * @param index index of clock in ofw + * @param name connection identifier for the clock + * + * @return struct rt_clk* point to the newly created clock object, or an error pointer + */ +static struct rt_clk *ofw_get_clk(struct rt_ofw_node *np, + const char *basename, int index, const char *name) +{ + struct rt_object *obj; + struct rt_clk *clk; + struct rt_clk_cell *cell; + struct rt_clk_node *clk_np = RT_NULL; + struct rt_ofw_node *clk_ofw_np; + struct rt_ofw_cell_args clk_args; - clk_arr->count = count; + if (rt_ofw_parse_phandle_cells(np, basename, "#clock-cells", index, &clk_args)) + { + return RT_NULL; + } - rt_hw_spin_lock(&_clk_lock.lock); + clk_ofw_np = clk_args.data; - for (i = 0; i < count; ++i) - { - const char *name = RT_NULL; + if (!rt_ofw_data(clk_ofw_np)) + { + if (clk_ofw_np == np) + { + LOG_D("%s: No registration to the system yet", rt_ofw_node_full_name(clk_ofw_np)); + return RT_NULL; + } - if (has_name) - { - rt_ofw_prop_read_string_index(np, "clock-names", i, &name); - } + rt_platform_ofw_request(clk_ofw_np); + } - clk_arr->clks[i] = ofw_get_clk_no_lock(np, i, name, RT_FALSE); + if (rt_ofw_data(clk_ofw_np) && (obj = rt_ofw_parse_object(clk_ofw_np, + RT_CLK_NODE_OBJ_NAME, "#clock-cells"))) + { + clk_np = rt_container_of(obj, struct rt_clk_node, parent); + } - if (rt_is_err(clk_arr->clks[i])) - { - err = rt_ptr_err(clk_arr->clks[i]); + if (!clk_np) + { + clk = rt_err_ptr(-RT_EINVAL); + goto _end; + } - --i; - break; - } - } + if (!clk_args.args_count) + { + clk_args.args[0] = 0; + } + index = clk_args.args[0]; - rt_hw_spin_unlock(&_clk_lock.lock); + if (rt_ofw_prop_read_bool(clk_ofw_np, "clock-indices")) + { + const fdt32_t *val_raw; + rt_uint32_t val, indice = 0; + struct rt_ofw_prop *prop; - if (i > 0 && i < count) + rt_ofw_foreach_prop_u32(clk_ofw_np, "clock-indices", prop, val_raw, val) + { + if (index == val) { - rt_clk_array_put(clk_arr); - clk_arr = rt_err_ptr(err); + index = indice; + goto _goon; } + ++indice; } + + clk = rt_err_ptr(-RT_EINVAL); + goto _end; } +_goon: - return clk_arr; + rt_ofw_prop_read_string_index(clk_ofw_np, "clock-output-names", index, &name); + + if (!(cell = ofw_get_cell(clk_np, &clk_args))) + { + LOG_D("%s: CLK index = %d (%s) is not implemented", + rt_ofw_node_full_name(np), index, name); + return RT_NULL; + } + + clk = clk_alloc(cell, rt_ofw_node_full_name(np), name); + + if (clk) + { + clk_cell_bind(cell, clk); + } + else + { + clk = rt_err_ptr(-RT_ENOMEM); + } + +_end: + rt_ofw_node_put(clk_ofw_np); + + return clk; } /** @@ -1344,7 +2220,7 @@ struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index) if (np && index >= 0) { - clk = ofw_get_clk(np, index, RT_NULL); + clk = ofw_get_clk(np, "clocks", index, RT_NULL); } return clk; @@ -1368,7 +2244,7 @@ struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name) if (index >= 0) { - clk = ofw_get_clk(np, index, name); + clk = ofw_get_clk(np, "clocks", index, name); } } @@ -1431,6 +2307,10 @@ rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np) ++count; } } + if (!count) + { + count = 1; + } } else { @@ -1450,6 +2330,219 @@ rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np) return -RT_EINVAL; } +/** + * @brief Get parent clock name from device tree + * + * @param np Pointer to device tree node + * @param index Index within "clocks" property + * + * @return const char* Name of the parent clock, or NULL if not found + */ +const char *rt_ofw_clk_get_parent_name(struct rt_ofw_node *np, int index) +{ + const char *pname = RT_NULL; + struct rt_ofw_node *clk_ofw_np; + struct rt_ofw_cell_args clk_args; + + if (rt_ofw_parse_phandle_cells(np, "clocks", "#clock-cells", index, &clk_args)) + { + return RT_NULL; + } + + clk_ofw_np = clk_args.data; + + index = clk_args.args_count ? clk_args.args[0] : 0; + + if (rt_ofw_prop_read_bool(clk_ofw_np, "clock-indices")) + { + const fdt32_t *val_raw; + rt_uint32_t val, indice = 0; + struct rt_ofw_prop *prop; + + rt_ofw_foreach_prop_u32(clk_ofw_np, "clock-indices", prop, val_raw, val) + { + if (index == val) + { + index = indice; + goto _goon; + } + ++indice; + } + + goto _end; + } +_goon: + + if (rt_ofw_prop_read_string_index(clk_ofw_np, "clock-output-names", index, &pname)) + { + struct rt_clk *provider_clk = rt_ofw_get_clk(np, index); + + if (rt_is_err_or_null(provider_clk)) + { + pname = provider_clk->cell->name; + + rt_clk_put(provider_clk); + } + } + +_end: + rt_ofw_node_put(clk_ofw_np); + + return pname; +} + +/** + * @brief Initialize clock from device tree (OFW) defaults. + * + * @details + * This function applies the device tree–specified clock default. + * It processes the following standard DT bindings in order: + * + * - **assigned-clocks**: list of clock phandles that must be configured + * before the device is probed. + * - **assigned-clock-parents**: optional list of corresponding parent + * clock phandles for each entry in *assigned-clocks*. + * - **assigned-clock-rates**: optional list of target rates (in Hz) + * to set for each clock in *assigned-clocks*. + * + * For each assigned clock, the function will: + * 1. Retrieve the referenced clock handle. + * 2. Set its parent if a corresponding entry in + * *assigned-clock-parents* exists. + * 3. Set its rate if a corresponding entry in + * *assigned-clock-rates* exists. + * + * This ensures that all clocks required by a device are configured + * according to the hardware design before the device driver runs. + * + * @param np Point to ofw node + * + * @return + * - RT_EOK : Successfully applied assigned-clocks settings. + * - -RT_EINVAL : Invalid or inconsistent device tree entries. + * - -RT_ENOSYS : Clock driver does not support required operation. + * - Other negative values : Underlying driver or hardware error. + * + * @note + * - This function should be called **after all clocks in the system + * have been registered**, ensuring that referenced parents exist. + * - Clocks not listed in *assigned-clocks* are left unchanged. + * - The function is typically invoked at the end of + * `rt_clk_node_register()`. + */ +rt_err_t rt_ofw_clk_set_defaults(struct rt_ofw_node *np) +{ + struct rt_clk *clk; + struct rt_clk_array *clk_arr; + + if (!np) + { + return RT_EOK; + } + + clk_arr = ofw_get_clk_array(np, "assigned-clocks", RT_NULL); + + if (rt_is_err(clk_arr)) + { + return rt_ptr_err(clk_arr); + } + + if (clk_arr) + { + rt_uint32_t rate; + struct rt_clk_array *clk_parent_arr; + + clk_parent_arr = ofw_get_clk_array(np, "assigned-clock-parents", RT_NULL); + + if (rt_is_err(clk_parent_arr)) + { + rt_clk_array_put(clk_arr); + return rt_ptr_err(clk_parent_arr); + } + + for (int i = 0; i < clk_arr->count; ++i) + { + clk = clk_arr->clks[i]; + + if (clk_parent_arr && i < clk_parent_arr->count) + { + rt_clk_set_parent(clk, clk_parent_arr->clks[i]); + } + + if (!rt_ofw_prop_read_u32_index(np, "assigned-clock-rates", i, &rate)) + { + rt_clk_set_rate(clk, rate); + } + } + + rt_clk_array_put(clk_parent_arr); + rt_clk_array_put(clk_arr); + } + + return RT_EOK; +} #endif /* RT_USING_OFW */ +#if defined(RT_USING_CONSOLE) && defined(RT_USING_MSH) +static int list_clk(int argc, char**argv) +{ + struct rt_clk_node *clk_np; + struct rt_clk_cell *cell, *parent; + + rt_kprintf("%-*.s %-*.s %-*.s %-*.s %-*.s %-*.s Parent\n", + 32, "Name", + 12, "Enable Count", + 13, "Prepare Count", + 11, "Rate", + 32, "Device ID", + 32, "Connection ID"); + + clk_lock(); + rt_list_for_each_entry(clk_np, &_clk_node_nodes, parent.list) + { + for (int i = 0; i < clk_np->cells_nr; ++i) + { + rt_ubase_t rate; + const char *dev_id = "deviceless", *con_id = "no_connection_id"; + + cell = clk_np->cells[i]; + + if (!cell) + { + continue; + } + + rate = cell->rate ? : rt_clk_cell_get_rate(cell); + + if (cell->clk) + { + if (cell->clk->dev_id) + { + dev_id = cell->clk->dev_id; + } + if (cell->clk->con_id) + { + con_id = cell->clk->con_id; + } + } + + parent = rt_clk_cell_get_parent(cell); + + rt_kprintf("%-*.s %-12d %-13d %-11lu %-*.s %-*.s %s\n", + 32, cell->name, + cell->enable_count, + cell->prepare_count, + rate, + 32, dev_id, + 32, con_id, + parent ? parent->name : RT_NULL); + } + } + clk_unlock(); + + return 0; +} +MSH_CMD_EXPORT(list_clk, dump all of clk information); +#endif /* RT_USING_CONSOLE && RT_USING_MSH */ + /**@}*/ \ No newline at end of file diff --git a/components/drivers/core/platform.c b/components/drivers/core/platform.c index c3a141654a8..d308dd08065 100644 --- a/components/drivers/core/platform.c +++ b/components/drivers/core/platform.c @@ -15,6 +15,9 @@ #define DBG_LVL DBG_INFO #include +#ifdef RT_USING_CLK +#include +#endif #include #include #include @@ -119,6 +122,13 @@ static rt_err_t platform_probe(rt_device_t dev) struct rt_ofw_node *np = dev->ofw_node; #endif +#ifdef RT_USING_CLK + if ((err = rt_ofw_clk_set_defaults(dev->ofw_node))) + { + return err; + } +#endif + err = rt_dm_power_domain_attach(dev, RT_TRUE); if (err && err != -RT_EEMPTY) diff --git a/components/drivers/include/drivers/clk.h b/components/drivers/include/drivers/clk.h index 26da4d87200..dd74fdc85bf 100644 --- a/components/drivers/include/drivers/clk.h +++ b/components/drivers/include/drivers/clk.h @@ -7,6 +7,7 @@ * Date Author Notes * 2022-11-26 GuEe-GUI first version * 2025-01-24 wumingzi add doxygen comment + * 2024-05-01 GuEe-GUI make cell for hareware clock */ #ifndef __CLK_H__ @@ -30,83 +31,131 @@ #define RT_CLK_NODE_OBJ_NAME "CLKNP" +struct rt_clk; struct rt_clk_ops; -struct rt_reset_control_node; +struct rt_clk_cell; /** - * @brief Clk node, it is a pat of clk source or controller - * @note Defined as the array like this if the CLK have multi out clocks: - * @code{.c} - * struct XYZ_single_clk - * { - * struct rt_clk_node parent; - * ... - * }; + * @brief Clock provider node - represents a hardware clock controller. * - * struct XYZ_multi_clk - * { - * struct rt_clk_node parent[N]; - * ... - * }; - * @endcode - * We assume the 'N' is the max value of element in 'clock-indices' if OFW. + * A @ref rt_clk_node corresponds to one hardware clock provider in the + * system, such as a PLL controller, a clock multiplexer, or a composite + * clock block defined in the device tree. + * + * Each clock node may contain multiple hardware clock outputs, described + * as @ref rt_clk_cell structures, which represent individual leaf clocks. + * + * Members: + * - `parent` — embedded @ref rt_object header for RT-Thread object system. + * - `dev` — back-reference to the hardware device providing this clock domain. + * - `parents_clk` — optional array of parent clock handles. + * - `multi_clk` — number of clock outputs exported by this provider. + * - `cells` — list of @ref rt_clk_cell pointers representing each output. + * - `ofw_parse` — callback used to parse clock arguments from device tree + * (`#clock-cells`) and select the corresponding @ref rt_clk_cell. + * - `priv` — implementation-specific private data. + * + * Typical usage: + * 1. Define a @ref rt_clk_node describing the hardware clock controller. + * 2. Implement `ofw_parse()` to resolve device tree `phandle` arguments. + * 3. Register the node using @ref rt_clk_register(). */ struct rt_clk_node { - struct rt_object rt_parent; + struct rt_object parent; - rt_list_t list; - rt_list_t children_nodes; + struct rt_device *dev; + struct rt_clk_array *parents_clk; + + rt_size_t multi_clk; + + rt_size_t cells_nr; + struct rt_clk_cell **cells; + + struct rt_clk_cell *(*ofw_parse)(struct rt_clk_node *clk_np, struct rt_ofw_cell_args *args); + + void *priv; +}; + +#define RT_CLK_F_SET_RATE_GATE RT_BIT(0) /**< Must be gated across rate change */ +#define RT_CLK_F_SET_PARENT_GATE RT_BIT(1) /**< Must be gated across re-parent */ +#define RT_CLK_F_SET_RATE_PARENT RT_BIT(2) /**< Propagate rate change up one level */ +#define RT_CLK_F_IGNORE_UNUSED RT_BIT(3) /**< Do not gate even if unused */ +#define RT_CLK_F_SET_RATE_UNGATE RT_BIT(4) /**< Clock needs to run to set rate */ +#define RT_CLK_F_IS_CRITICAL RT_BIT(5) /**< Do not gate, ever */ +#define RT_CLK_F_GET_RATE_NOCACHE RT_BIT(6) /**< Do not get rate by cache */ + +/** + * @brief Clock cell - represents a single hardware clock element. + * + * A clk_cell is the fundamental unit of a clock tree, such as a PLL, divider, + * mux, or gate. It maintains its relationship to parent clocks using pointers, + * not lists, for lightweight hierarchy management. + * + * The 'ops' field defines hardware-specific callbacks. The framework invokes + * these during enable, disable, and rate changes. + */ +struct rt_clk_cell +{ + struct rt_clk_node *clk_np; const char *name; const struct rt_clk_ops *ops; - struct rt_clk_node *parent; - struct rt_ref ref; + rt_uint8_t parents_nr; + union + { + const char *parent_name; /**< When parents_nr = 1 */ + const char *const *parent_names; + }; - rt_ubase_t rate; - rt_ubase_t min_rate; - rt_ubase_t max_rate; + rt_ubase_t rate; /**< Cached or fixed rate (not always accurate) */ + struct rt_clk *clk; + struct rt_clk *parent; - rt_size_t notifier_count; + int prepare_count; + int enable_count; - void *priv; + rt_uint32_t flags; - struct rt_clk *clk; - rt_size_t multi_clk; + void *priv; }; /** - * @brief Constant rate clk + * @brief Fixed-rate clock descriptor. + * + * Used for constant-frequency clocks without configurable parents or dividers. */ struct rt_clk_fixed_rate { - struct rt_clk_node clk; + struct rt_clk_cell cell; rt_ubase_t fixed_rate; rt_ubase_t fixed_accuracy; }; /** - * @brief Clk object, it can be clk source or controller + * @brief Clock handle - represents a consumer reference to a clock. + * + * Each consumer obtains an rt_clk instance bound to a specific clk_cell. + * The handle stores consumer-specific constraints such as min/max rate. */ struct rt_clk { - struct rt_clk_node *clk_np; + struct rt_clk_cell *cell; - const char *dev_id; - const char *con_id; + const char *dev_id; /**< Device identifier using this clock */ + const char *con_id; /**< Connection identifier (name) */ - rt_ubase_t rate; - int prepare_count; - int enable_count; - - void *fw_node; - void *priv; + rt_ubase_t min_rate; + rt_ubase_t max_rate; }; /** - * @brief Clk array + * @brief Clock array container. + * + * Represents a group of rt_clk handles, typically used for devices that + * require multiple clock inputs. */ struct rt_clk_array { @@ -116,20 +165,23 @@ struct rt_clk_array struct rt_clk_ops { - rt_err_t (*init)(struct rt_clk *, void *fw_data); - rt_err_t (*finit)(struct rt_clk *); - /* API */ - rt_err_t (*prepare)(struct rt_clk *); - void (*unprepare)(struct rt_clk *); - rt_bool_t (*is_prepared)(struct rt_clk *); - rt_err_t (*enable)(struct rt_clk *); - void (*disable)(struct rt_clk *); - rt_bool_t (*is_enabled)(struct rt_clk *); - rt_err_t (*set_rate)(struct rt_clk *, rt_ubase_t rate, rt_ubase_t parent_rate); - rt_err_t (*set_parent)(struct rt_clk *, struct rt_clk *parent); - rt_err_t (*set_phase)(struct rt_clk *, int degrees); - rt_base_t (*get_phase)(struct rt_clk *); - rt_base_t (*round_rate)(struct rt_clk *, rt_ubase_t drate, rt_ubase_t *prate); + rt_err_t (*prepare)(struct rt_clk_cell *cell); + void (*unprepare)(struct rt_clk_cell *cell); + rt_bool_t (*is_prepared)(struct rt_clk_cell *cell); + + rt_err_t (*enable)(struct rt_clk_cell *cell); + void (*disable)(struct rt_clk_cell *cell); + rt_bool_t (*is_enabled)(struct rt_clk_cell *cell); + + rt_ubase_t (*recalc_rate)(struct rt_clk_cell *cell, rt_ubase_t parent_rate); + rt_base_t (*round_rate)(struct rt_clk_cell *cell, rt_ubase_t drate, rt_ubase_t *prate); + rt_err_t (*set_rate)(struct rt_clk_cell *cell, rt_ubase_t rate, rt_ubase_t parent_rate); + + rt_err_t (*set_parent)(struct rt_clk_cell *cell, rt_uint8_t idx); + rt_uint8_t (*get_parent)(struct rt_clk_cell *cell); + + rt_err_t (*set_phase)(struct rt_clk_cell *cell, int degrees); + rt_base_t (*get_phase)(struct rt_clk_cell *cell); }; struct rt_clk_notifier; @@ -142,7 +194,10 @@ typedef rt_err_t (*rt_clk_notifier_callback)(struct rt_clk_notifier *notifier, rt_ubase_t msg, rt_ubase_t old_rate, rt_ubase_t new_rate); /** - * @brief Clock notifier, it containers of clock list and callback function + * @brief Clock notifier descriptor. + * + * Used to register callbacks for clock events (rate change, abort, etc). + * Each notifier is linked to a specific clock and triggered on rate changes. */ struct rt_clk_notifier { @@ -153,16 +208,14 @@ struct rt_clk_notifier void *priv; }; -rt_err_t rt_clk_register(struct rt_clk_node *clk_np, struct rt_clk_node *parent_np); +rt_err_t rt_clk_register(struct rt_clk_node *clk_np); rt_err_t rt_clk_unregister(struct rt_clk_node *clk_np); rt_err_t rt_clk_notifier_register(struct rt_clk *clk, struct rt_clk_notifier *notifier); rt_err_t rt_clk_notifier_unregister(struct rt_clk *clk, struct rt_clk_notifier *notifier); -rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent); - rt_err_t rt_clk_prepare(struct rt_clk *clk); -rt_err_t rt_clk_unprepare(struct rt_clk *clk); +void rt_clk_unprepare(struct rt_clk *clk); rt_err_t rt_clk_enable(struct rt_clk *clk); void rt_clk_disable(struct rt_clk *clk); @@ -171,7 +224,7 @@ rt_err_t rt_clk_prepare_enable(struct rt_clk *clk); void rt_clk_disable_unprepare(struct rt_clk *clk); rt_err_t rt_clk_array_prepare(struct rt_clk_array *clk_arr); -rt_err_t rt_clk_array_unprepare(struct rt_clk_array *clk_arr); +void rt_clk_array_unprepare(struct rt_clk_array *clk_arr); rt_err_t rt_clk_array_enable(struct rt_clk_array *clk_arr); void rt_clk_array_disable(struct rt_clk_array *clk_arr); @@ -185,12 +238,28 @@ rt_err_t rt_clk_set_max_rate(struct rt_clk *clk, rt_ubase_t rate); rt_err_t rt_clk_set_rate(struct rt_clk *clk, rt_ubase_t rate); rt_ubase_t rt_clk_get_rate(struct rt_clk *clk); +rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate); + +rt_err_t rt_clk_set_parent(struct rt_clk *clk, struct rt_clk *clk_parent); +struct rt_clk *rt_clk_get_parent(struct rt_clk *clk); + rt_err_t rt_clk_set_phase(struct rt_clk *clk, int degrees); rt_base_t rt_clk_get_phase(struct rt_clk *clk); -rt_base_t rt_clk_round_rate(struct rt_clk *clk, rt_ubase_t rate); +struct rt_clk *rt_clk_cell_get_clk(const struct rt_clk_cell *cell, const char *con_id); -struct rt_clk *rt_clk_get_parent(struct rt_clk *clk); +rt_bool_t rt_clk_cell_is_prepared(const struct rt_clk_cell *cell); + +rt_bool_t rt_clk_cell_is_enabled(const struct rt_clk_cell *cell); + +rt_ubase_t rt_clk_cell_get_rate(const struct rt_clk_cell *cell); + +rt_ubase_t rt_clk_cell_round_rate(struct rt_clk_cell *cell, rt_ubase_t rate); + +struct rt_clk_cell *rt_clk_cell_get_parent(const struct rt_clk_cell *cell); +struct rt_clk_cell *rt_clk_cell_get_parent_by_index(const struct rt_clk_cell *cell, rt_uint8_t idx); +rt_uint8_t rt_clk_cell_get_parent_index(struct rt_clk_cell *cell); +rt_err_t rt_clk_cell_set_parent(struct rt_clk_cell *cell, struct rt_clk_cell *parent); struct rt_clk_array *rt_clk_get_array(struct rt_device *dev); struct rt_clk *rt_clk_get_by_index(struct rt_device *dev, int index); @@ -203,6 +272,8 @@ struct rt_clk_array *rt_ofw_get_clk_array(struct rt_ofw_node *np); struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index); struct rt_clk *rt_ofw_get_clk_by_name(struct rt_ofw_node *np, const char *name); rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np); +const char *rt_ofw_clk_get_parent_name(struct rt_ofw_node *np, int index); +rt_err_t rt_ofw_clk_set_defaults(struct rt_ofw_node *np); #else rt_inline struct rt_clk *rt_ofw_get_clk(struct rt_ofw_node *np, int index) { @@ -216,6 +287,14 @@ rt_inline rt_ssize_t rt_ofw_count_of_clk(struct rt_ofw_node *clk_ofw_np) { return 0; } +rt_inline const char *rt_ofw_clk_get_parent_name(struct rt_ofw_node *np, int index) +{ + return RT_NULL; +} +rt_inline rt_err_t rt_ofw_clk_set_defaults(struct rt_ofw_node *np) +{ + return RT_EOK; +} #endif /* RT_USING_OFW */ /*! @}*/