From 644c7c76071d5c1bb17c78cde8b75a0c3f4e8767 Mon Sep 17 00:00:00 2001 From: Chris Burr Date: Fri, 7 Feb 2025 10:52:05 +0100 Subject: [PATCH] Support local running of "conda-forge-tick --dry-run auto-tick" --- README.md | 4 ++- conda_forge_tick/auto_tick.py | 56 ++++++++++++++++++++---------- conda_forge_tick/cli.py | 12 +++++-- conda_forge_tick/make_migrators.py | 22 ++++++++++-- 4 files changed, 70 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 2f2963961..cd78b25f8 100644 --- a/README.md +++ b/README.md @@ -281,9 +281,10 @@ pip install -e . Then you can use the CLI like this: ```bash -conda-forge-tick --help +conda-forge-tick --dry-run ... ``` +See `--help` for a list of the commands available. For debugging, use the `--debug` flag. This enables debug logging and disables multiprocessing. Note that the bot expects the [conda-forge dependency graph](https://github.com/regro/cf-graph-countyfair) to be @@ -298,6 +299,7 @@ The local debugging functionality is still work in progress and might not work f Currently, the following commands are supported and tested: - `update-upstream-versions` +- `auto-tick`: To ease debugging, pass `--filter-pattern=` to give a regex of which migrators to enable. ### Structure of the Bot's Jobs diff --git a/conda_forge_tick/auto_tick.py b/conda_forge_tick/auto_tick.py index 09dbc6c1a..4dfaa6d13 100644 --- a/conda_forge_tick/auto_tick.py +++ b/conda_forge_tick/auto_tick.py @@ -436,6 +436,7 @@ def run_with_tmpdir( git_backend: GitPlatformBackend, rerender: bool = True, base_branch: str = "main", + dry_run: bool = False, **kwargs: typing.Any, ) -> tuple[MigrationUidTypedDict, dict] | tuple[Literal[False], Literal[False]]: """ @@ -454,11 +455,12 @@ def run_with_tmpdir( git_backend=git_backend, rerender=rerender, base_branch=base_branch, + dry_run=dry_run, **kwargs, ) -def _make_and_sync_pr_lazy_json(pr_data) -> LazyJson: +def _make_and_sync_pr_lazy_json(pr_data, dry_run) -> LazyJson: if pr_data: pr_lazy_json = LazyJson( os.path.join("pr_json", f"{pr_data.id}.json"), @@ -466,7 +468,7 @@ def _make_and_sync_pr_lazy_json(pr_data) -> LazyJson: with pr_lazy_json as __edit_pr_lazy_json: __edit_pr_lazy_json.update(**pr_data.model_dump(mode="json")) - if "id" in pr_lazy_json: + if "id" in pr_lazy_json and not dry_run: sync_lazy_json_object(pr_lazy_json, "file", ["github_api"]) else: @@ -481,6 +483,7 @@ def run( git_backend: GitPlatformBackend, rerender: bool = True, base_branch: str = "main", + dry_run: bool = False, **kwargs: typing.Any, ) -> tuple[MigrationUidTypedDict, dict] | tuple[Literal[False], Literal[False]]: """For a given feedstock and migration run the migration @@ -557,7 +560,7 @@ def run( # spoof this so it looks like the package is done pr_data = get_spoofed_closed_pr_info() - pr_lazy_json = _make_and_sync_pr_lazy_json(pr_data) + pr_lazy_json = _make_and_sync_pr_lazy_json(pr_data, dry_run) _reset_pre_pr_migrator_fields( context.attrs, migrator_name, is_version=is_version_migration ) @@ -652,7 +655,7 @@ def run( comment=rerender_info.rerender_comment, ) - pr_lazy_json = _make_and_sync_pr_lazy_json(pr_data) + pr_lazy_json = _make_and_sync_pr_lazy_json(pr_data, dry_run) # If we've gotten this far then the node is good with context.attrs["pr_info"] as pri: @@ -731,6 +734,7 @@ def _run_migrator_on_feedstock_branch( mctx, migrator_name, good_prs, + dry_run, ): break_loop = False sync_pr_info = False @@ -748,6 +752,7 @@ def _run_migrator_on_feedstock_branch( rerender=migrator.rerender, base_branch=base_branch, hash_type=attrs.get("hash_type", "sha256"), + dry_run=dry_run, ) finally: fctx.attrs.pop("new_version", None) @@ -901,19 +906,22 @@ def _run_migrator_on_feedstock_branch( if sync_pr_info: with attrs["pr_info"] as pri: pass - sync_lazy_json_object(pri, "file", ["github_api"]) + if not dry_run: + sync_lazy_json_object(pri, "file", ["github_api"]) if sync_version_pr_info: with attrs["version_pr_info"] as vpri: pass - sync_lazy_json_object(vpri, "file", ["github_api"]) + if not dry_run: + sync_lazy_json_object(vpri, "file", ["github_api"]) return good_prs, break_loop -def _is_migrator_done(_mg_start, good_prs, time_per, pr_limit, tried_prs): +def _is_migrator_done( + _mg_start, good_prs, time_per, pr_limit, tried_prs, backend: GitPlatformBackend +): curr_time = time.time() - backend = github_backend() api_req = backend.get_api_requests_left() if curr_time - START_TIME > TIMEOUT: @@ -957,7 +965,9 @@ def _is_migrator_done(_mg_start, good_prs, time_per, pr_limit, tried_prs): return False -def _run_migrator(migrator, mctx, temp, time_per, git_backend: GitPlatformBackend): +def _run_migrator( + migrator, mctx, temp, time_per, git_backend: GitPlatformBackend, dry_run +): _mg_start = time.time() migrator_name = get_migrator_name(migrator) @@ -1013,7 +1023,7 @@ def _run_migrator(migrator, mctx, temp, time_per, git_backend: GitPlatformBacken ) if _is_migrator_done( - _mg_start, good_prs, time_per, migrator.pr_limit, tried_prs + _mg_start, good_prs, time_per, migrator.pr_limit, tried_prs, git_backend ): return 0 @@ -1032,7 +1042,7 @@ def _run_migrator(migrator, mctx, temp, time_per, git_backend: GitPlatformBacken # Don't let CI timeout, break ahead of the timeout so we make certain # to write to the repo if _is_migrator_done( - _mg_start, good_prs, time_per, migrator.pr_limit, tried_prs + _mg_start, good_prs, time_per, migrator.pr_limit, tried_prs, git_backend ): break @@ -1089,6 +1099,7 @@ def _run_migrator(migrator, mctx, temp, time_per, git_backend: GitPlatformBacken mctx=mctx, migrator_name=migrator_name, good_prs=good_prs, + dry_run=dry_run, ) if break_loop: break @@ -1278,15 +1289,16 @@ def _update_graph_with_pr_info(): dump_graph(gx) -def main(ctx: CliContext) -> None: +def main(ctx: CliContext, no_update_graph: bool, filter_pattern: str | None) -> None: global START_TIME START_TIME = time.time() _setup_limits() - with fold_log_lines("updating graph with PR info"): - _update_graph_with_pr_info() - deploy(ctx, dirs_to_deploy=["version_pr_info", "pr_json", "pr_info"]) + if not no_update_graph: + with fold_log_lines("updating graph with PR info"): + _update_graph_with_pr_info() + deploy(ctx, dirs_to_deploy=["version_pr_info", "pr_json", "pr_info"]) # record tmp dir so we can be sure to clean it later temp = glob.glob("/tmp/*") @@ -1305,7 +1317,7 @@ def main(ctx: CliContext) -> None: smithy_version=smithy_version, pinning_version=pinning_version, ) - migrators = load_migrators() + migrators = load_migrators(pattern=filter_pattern) # compute the time per migrator with fold_log_lines("computing migrator run times"): @@ -1339,7 +1351,15 @@ def main(ctx: CliContext) -> None: git_backend = github_backend() if not ctx.dry_run else DryRunBackend() for mg_ind, migrator in enumerate(migrators): - _run_migrator(migrator, mctx, temp, time_per_migrator[mg_ind], git_backend) + _run_migrator( + migrator, + mctx, + temp, + time_per_migrator[mg_ind], + git_backend, + dry_run=ctx.dry_run, + ) - logger.info("API Calls Remaining: %d", github_backend().get_api_requests_left()) + if not ctx.dry_run: + logger.info("API Calls Remaining: %d", git_backend.get_api_requests_left()) logger.info("Done") diff --git a/conda_forge_tick/cli.py b/conda_forge_tick/cli.py index 370448b69..8887afce6 100644 --- a/conda_forge_tick/cli.py +++ b/conda_forge_tick/cli.py @@ -160,11 +160,19 @@ def update_upstream_versions( @main.command(name="auto-tick") +@click.option( + "--no-update-graph", is_flag=True, help="Don't update the graph with PR info" +) +@click.option( + "--filter-pattern", default=None, help="Only run migrators matching this pattern" +) @pass_context -def auto_tick(ctx: CliContext) -> None: +def auto_tick( + ctx: CliContext, no_update_graph: bool, filter_pattern=Optional[str] +) -> None: from . import auto_tick - auto_tick.main(ctx) + auto_tick.main(ctx, no_update_graph, filter_pattern) @main.command(name="make-status-report") diff --git a/conda_forge_tick/make_migrators.py b/conda_forge_tick/make_migrators.py index e4fc13978..50015b78f 100644 --- a/conda_forge_tick/make_migrators.py +++ b/conda_forge_tick/make_migrators.py @@ -872,13 +872,17 @@ def _load(name): return make_from_lazy_json_data(lzj.data) -def load_migrators(skip_paused: bool = True) -> MutableSequence[Migrator]: +def load_migrators( + skip_paused: bool = True, pattern: str | None = None +) -> MutableSequence[Migrator]: """Loads all current migrators. Parameters ---------- skip_paused : bool, optional Whether to skip paused migrators, defaults to True. + pattern : str, optional + A regular expression pattern to filter migrators, defaults to None. Returns ------- @@ -890,6 +894,15 @@ def load_migrators(skip_paused: bool = True) -> MutableSequence[Migrator]: pinning_migrators = [] longterm_migrators = [] all_names = get_all_keys_for_hashmap("migrators") + if pattern is not None: + original_all_names = all_names + all_names = [n for n in all_names if re.fullmatch(pattern, n)] + if not all_names: + raise ValueError( + f"No migrators found matching pattern {pattern}. " + f"Available migrators: {original_all_names}" + ) + print(f"Reduced migrators from {len(original_all_names)} to {len(all_names)}") with executor("process", 4) as pool: futs = [pool.submit(_load, name) for name in all_names] @@ -914,11 +927,14 @@ def load_migrators(skip_paused: bool = True) -> MutableSequence[Migrator]: migrators.append(migrator) if version_migrator is None: - raise RuntimeError("No version migrator found in the migrators directory!") + if pattern is None: + raise RuntimeError("No version migrator found in the migrators directory!") + else: + migrators.insert(0, version_migrator) RNG.shuffle(pinning_migrators) RNG.shuffle(longterm_migrators) - migrators = [version_migrator] + migrators + pinning_migrators + longterm_migrators + migrators += pinning_migrators + longterm_migrators return migrators