Skip to content

Commit 0ad3aec

Browse files
committed
Create fake-migrator nodes to debug inter-migration limitations.
1 parent 5849947 commit 0ad3aec

File tree

2 files changed

+193
-3
lines changed

2 files changed

+193
-3
lines changed

conda_forge_tick/make_migrators.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,6 +1004,11 @@ def load_migrators(skip_paused: bool = True) -> MutableSequence[Migrator]:
10041004
pinning_migrators = []
10051005
longterm_migrators = []
10061006
all_names = get_all_keys_for_hashmap("migrators")
1007+
# Only load python314 and python314t migrators - filter BEFORE submitting to pool
1008+
allowed_migrators = {"python314", "python314t"}
1009+
all_names = [name for name in all_names if name in allowed_migrators]
1010+
print(f"Loading only: {all_names}", flush=True)
1011+
10071012
with executor("process", 2) as pool:
10081013
futs = [pool.submit(_load, name) for name in all_names]
10091014

@@ -1027,11 +1032,13 @@ def load_migrators(skip_paused: bool = True) -> MutableSequence[Migrator]:
10271032
else:
10281033
migrators.append(migrator)
10291034

1030-
version_migrator = _make_version_migrator(load_existing_graph())
1035+
# Commented out - version migrator is slow
1036+
# version_migrator = _make_version_migrator(load_existing_graph())
10311037

10321038
RNG.shuffle(pinning_migrators)
10331039
RNG.shuffle(longterm_migrators)
1034-
migrators = [version_migrator] + migrators + pinning_migrators + longterm_migrators
1040+
# migrators = [version_migrator] + migrators + pinning_migrators + longterm_migrators
1041+
migrators = migrators + pinning_migrators + longterm_migrators
10351042

10361043
return migrators
10371044

conda_forge_tick/status_report.py

Lines changed: 184 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
ArchRebuild,
2525
GraphMigrator,
2626
MatplotlibBase,
27+
MigrationYaml,
2728
MigrationYamlCreator,
2829
Migrator,
2930
OSXArm,
@@ -52,6 +53,21 @@
5253
"unstable",
5354
]
5455

56+
# Cache for migrators by name, loaded on demand
57+
_migrators_by_name_cache: Dict[str, Migrator] | None = None
58+
59+
60+
def _get_migrators_by_name() -> Dict[str, Migrator]:
61+
"""Get mapping of migrator report_name to migrator instance.
62+
63+
Uses a module-level cache to avoid reloading migrators multiple times.
64+
"""
65+
global _migrators_by_name_cache
66+
if _migrators_by_name_cache is None:
67+
migrators = load_migrators(skip_paused=False)
68+
_migrators_by_name_cache = {m.report_name: m for m in migrators}
69+
return _migrators_by_name_cache
70+
5571

5672
def _sorted_set_json(obj: Any) -> Any:
5773
"""If obj is a set, return sorted(obj). Else, raise TypeError.
@@ -164,6 +180,40 @@ def write_version_migrator_status(migrator, mctx):
164180
)
165181

166182

183+
def _get_waiting_migrators(migrator: Migrator, attrs: dict) -> list[str]:
184+
"""Get list of migrators that this package is waiting for.
185+
186+
Returns empty list if not waiting for any migrators.
187+
"""
188+
if not isinstance(migrator, MigrationYaml):
189+
return []
190+
191+
migrator_payload = migrator.loaded_yaml.get("__migrator", {})
192+
wait_for_migrators = migrator_payload.get("wait_for_migrators", [])
193+
194+
if not wait_for_migrators:
195+
return []
196+
197+
# Check if we're actually waiting (i.e., migrators not all closed)
198+
found_migrators = set()
199+
for migration in attrs.get("pr_info", {}).get("PRed", []):
200+
name = migration.get("data", {}).get("name", "")
201+
if not name or name not in wait_for_migrators:
202+
continue
203+
found_migrators.add(name)
204+
state = migration.get("PR", {}).get("state", "")
205+
if state != "closed":
206+
# Still waiting for this one
207+
return list(wait_for_migrators)
208+
209+
# Check if any migrators are missing
210+
missing_migrators = set(wait_for_migrators) - found_migrators
211+
if missing_migrators:
212+
return list(wait_for_migrators)
213+
214+
return []
215+
216+
167217
def graph_migrator_status(
168218
migrator: Migrator,
169219
gx: nx.DiGraph,
@@ -200,6 +250,7 @@ def graph_migrator_status(
200250

201251
for node, node_attrs in gx2.nodes.items():
202252
attrs = node_attrs["payload"]
253+
203254
# remove archived from status
204255
if attrs.get("archived", False):
205256
continue
@@ -344,20 +395,133 @@ def graph_migrator_status(
344395
)
345396
node_metadata["pr_status"] = pr_json["PR"].get("mergeable_state", "")
346397

398+
# Collect waiting migrators info for creating fake nodes
399+
waiting_migrators_map: Dict[str, list[str]] = {}
400+
for node, node_attrs in gx2.nodes.items():
401+
attrs = node_attrs["payload"]
402+
403+
# remove archived from status
404+
if attrs.get("archived", False):
405+
continue
406+
407+
# Check if waiting for other migrators
408+
waiting_migrators = _get_waiting_migrators(migrator, attrs)
409+
if waiting_migrators:
410+
waiting_migrators_map[node] = waiting_migrators
411+
412+
# Add fake migrator nodes and edges after processing regular nodes
413+
# Create one fake node per waiting migrator PR: migrator_{migrator_name}_{node}_{pr_number}
414+
# This represents the PR for the waiting migrator on the node itself (not its predecessors)
415+
fake_migrator_nodes: Dict[str, Dict] = {}
416+
417+
for node, migrator_names in waiting_migrators_map.items():
418+
node_attrs = gx2.nodes[node]
419+
attrs = node_attrs["payload"]
420+
421+
for migrator_name in migrator_names:
422+
# Look up the correct migrator instance for the waiting migrator
423+
# (not the current migrator being processed)
424+
migrators_by_name = _get_migrators_by_name()
425+
if migrator_name not in migrators_by_name:
426+
# Migrator not found, skip
427+
continue
428+
waiting_migrator = migrators_by_name[migrator_name]
429+
430+
# Check if this node has a PR for the waiting migrator
431+
nuid = waiting_migrator.migrator_uid(attrs)
432+
433+
nuid_data = frozen_to_json_friendly(nuid)["data"]
434+
435+
# Look for a matching PR in this node's PRed list
436+
# No need to copy since we're not modifying the PR data
437+
matching_pr_json = None
438+
for pr_json in attrs.get("pr_info", {}).get("PRed", []):
439+
if pr_json and pr_json.get("data") == nuid_data:
440+
matching_pr_json = pr_json
441+
break
442+
443+
if matching_pr_json is None:
444+
# Node doesn't have a PR for this migrator yet - skip
445+
continue
446+
447+
# Get PR data
448+
pr_data = matching_pr_json.get("PR", {})
449+
pr_number = pr_data.get("number")
450+
if pr_number is None:
451+
continue
452+
453+
# Create fake node name: migrator_{migrator_name}_{node}_{pr_number}
454+
# I'm not sure we could have 2 PRs for the same package in another migration,
455+
# but just to be safe
456+
fake_parent = f"migrator_{migrator_name}_{node}_{pr_number}"
457+
458+
# Add fake node to graph if it doesn't exist
459+
if fake_parent not in gx2.nodes():
460+
gx2.add_node(fake_parent, payload={})
461+
462+
pr_url = pr_data.get("html_url", "")
463+
pr_status = pr_data.get("state", "")
464+
465+
if not pr_url:
466+
feedstock_name = attrs.get("feedstock_name", node)
467+
pr_url = f"https://github.com/conda-forge/{feedstock_name}-feedstock/pull/{pr_number}"
468+
469+
fake_migrator_nodes[fake_parent] = {
470+
"pre_pr_migrator_status": "",
471+
"pr_url": pr_url,
472+
"pr_status": pr_status,
473+
}
474+
feedstock_metadata[fake_parent] = fake_migrator_nodes[fake_parent]
475+
476+
# Set status based on PR state
477+
if pr_status == "closed":
478+
# PR is closed but package is still waiting - this is an error!
479+
out["bot-error"].add(fake_parent)
480+
print(
481+
f"Package '{node}' waiting for migrator '{migrator_name}' but PR #{pr_number} is already closed. "
482+
f"Waiting logic may be incorrect.",
483+
flush=True,
484+
)
485+
else:
486+
# PR is open or in progress
487+
out["in-pr"].add(fake_parent)
488+
489+
# Add edge from fake migrator node to waiting package
490+
# (migrator blocks package, so migrator -> package)
491+
if node in gx2.nodes() and not gx2.has_edge(fake_parent, node):
492+
gx2.add_edge(fake_parent, node)
493+
494+
# Populate descendants and children for fake migrator nodes (must be done after all edges are added)
495+
for node_name, node_metadata in fake_migrator_nodes.items():
496+
node_metadata["num_descendants"] = len(nx.descendants(gx2, node_name))
497+
node_metadata["immediate_children"] = [
498+
k
499+
for k in sorted(gx2.successors(node_name))
500+
if not gx2[k].get("payload", {}).get("archived", False)
501+
]
502+
347503
out2: Dict = {}
348504
for k in out.keys():
505+
# Include all items, even if not in build_sequence (like fake migrator nodes)
349506
out2[k] = list(
350507
sorted(
351508
out[k],
352509
key=lambda x: (
353-
build_sequence.index(x) if x in build_sequence else -1,
510+
build_sequence.index(x)
511+
if x in build_sequence
512+
else len(build_sequence),
354513
x,
355514
),
356515
),
357516
)
358517

359518
out2["_feedstock_status"] = feedstock_metadata
519+
360520
for (e0, e1), edge_attrs in gx2.edges.items():
521+
# Skip edges involving fake migrator nodes - handle separately
522+
if e0.startswith("migrator_") or e1.startswith("migrator_"):
523+
continue
524+
361525
if (
362526
e0 not in out["done"]
363527
and e1 not in out["done"]
@@ -366,6 +530,25 @@ def graph_migrator_status(
366530
):
367531
gv.edge(e0, e1)
368532

533+
# Add nodes and edges for fake migrator parents in visualization
534+
# (Metadata and awaiting-parents status already added above, before sorting)
535+
for node_name in gx2.nodes():
536+
if node_name.startswith("migrator_"):
537+
migrator_display_name = node_name.replace("migrator_", "")
538+
539+
# Style migrator nodes differently (awaiting-parents color is #fde725)
540+
gv.node(
541+
node_name,
542+
label=_clean_text(migrator_display_name),
543+
fillcolor="#fde725", # Same color as awaiting-parents
544+
style="filled,dashed",
545+
fontcolor="black",
546+
)
547+
# Add edges from migrator to waiting packages with dashed style
548+
for successor in gx2.successors(node_name):
549+
if successor not in out["done"]:
550+
gv.edge(node_name, successor, style="dashed", color="orange")
551+
369552
print(" len(gv):", num_viz, flush=True)
370553
out2["_num_viz"] = num_viz
371554

0 commit comments

Comments
 (0)