diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/counter_map.rs b/turbopack/crates/turbo-tasks-backend/src/backend/counter_map.rs index 4dd4444f955c5..108825abbf77e 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/counter_map.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/counter_map.rs @@ -106,15 +106,7 @@ impl CounterMap { { self.0.get(key) } - // TODO(lukesandberg): this is just here for the CachedDataItem adaptor layer, can be removed - // once that is gone. - #[doc(hidden)] - pub fn get_mut(&mut self, _key: &K) -> Option<&mut V> - where - K: Eq + Hash, - { - unreachable!("This should never be called, please insert instead") - } + pub fn iter(&self) -> impl Iterator { self.0.iter() } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs index 6e1bba6de53b8..bc7af478d8952 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs @@ -26,8 +26,8 @@ use tokio::time::{Duration, Instant}; use tracing::{Span, trace_span}; use turbo_tasks::{ CellId, FxDashMap, RawVc, ReadCellOptions, ReadCellTracking, ReadConsistency, - ReadOutputOptions, ReadTracking, TRANSIENT_TASK_BIT, TaskExecutionReason, TaskId, TaskPriority, - TraitTypeId, TurboTasksBackendApi, ValueTypeId, + ReadOutputOptions, ReadTracking, SharedReference, TRANSIENT_TASK_BIT, TaskExecutionReason, + TaskId, TaskPriority, TraitTypeId, TurboTasksBackendApi, ValueTypeId, backend::{ Backend, CachedTaskType, CellContent, TaskExecutionSpec, TransientTaskRoot, TransientTaskType, TurboTasksExecutionError, TypedCellContent, VerificationMode, @@ -57,16 +57,13 @@ use crate::{ TaskGuard, connect_children, get_aggregation_number, get_uppers, is_root_node, make_task_dirty_internal, prepare_new_children, }, - storage::{ - Storage, count, get, get_many, get_mut, get_mut_or_insert_with, iter_many, remove, - }, + storage::Storage, storage_schema::{TaskStorage, TaskStorageAccessors}, }, backing_storage::BackingStorage, data::{ - ActivenessState, AggregationNumber, CachedDataItem, CachedDataItemKey, CachedDataItemType, - CachedDataItemValueRef, CellRef, CollectibleRef, CollectiblesRef, Dirtyness, - InProgressCellState, InProgressState, InProgressStateInner, OutputValue, RootType, + ActivenessState, CellRef, CollectibleRef, CollectiblesRef, Dirtyness, InProgressCellState, + InProgressState, InProgressStateInner, OutputValue, RootType, }, utils::{ bi_map::BiMap, chunked_vec::ChunkedVec, dash_map_drop_contents::drop_contents, @@ -522,7 +519,7 @@ impl TurboTasksBackendInner { ctx: &impl ExecuteContext<'_>, ) -> Option, anyhow::Error>> { - match get!(task, InProgress) { + match task.get_in_progress() { Some(InProgressState::Scheduled { done_event, .. }) => Some(Ok(Err( listen_to_done_event(this, reader, tracking, done_event), ))), @@ -577,7 +574,7 @@ impl TurboTasksBackendInner { // Check the dirty count of the root node let has_dirty_containers = task.has_dirty_containers(); if has_dirty_containers || is_dirty.is_some() { - let activeness = get_mut!(task, Activeness); + let activeness = task.get_activeness_mut(); let mut task_ids_to_schedule: Vec<_> = Vec::new(); // When there are dirty task, subscribe to the all_clean_event let activeness = if let Some(activeness) = activeness { @@ -590,14 +587,15 @@ impl TurboTasksBackendInner { // If we don't have a root state, add one. This also makes sure all tasks stay // active and this task won't stale. active_until_clean // is automatically removed when this task is clean. - get_mut_or_insert_with!(task, Activeness, || ActivenessState::new(task_id)) - .set_active_until_clean(); if ctx.should_track_activeness() { // A newly added Activeness need to make sure to schedule the tasks task_ids_to_schedule = task.dirty_containers().collect(); task_ids_to_schedule.push(task_id); } - get!(task, Activeness).unwrap() + let activeness = + task.get_activeness_mut_or_insert_with(|| ActivenessState::new(task_id)); + activeness.set_active_until_clean(); + activeness }; let listener = activeness.all_clean_event.listen_with_note(move || { let this = self.clone(); @@ -620,12 +618,13 @@ impl TurboTasksBackendInner { let task = ctx.task(task_id, TaskDataCategory::All); let is_dirty = task.is_dirty(); let in_progress = - get!(task, InProgress).map_or("not in progress", |p| match p { - InProgressState::InProgress(_) => "in progress", - InProgressState::Scheduled { .. } => "scheduled", - InProgressState::Canceled => "canceled", - }); - let activeness = get!(task, Activeness).map_or_else( + task.get_in_progress() + .map_or("not in progress", |p| match p { + InProgressState::InProgress(_) => "in progress", + InProgressState::Scheduled { .. } => "scheduled", + InProgressState::Canceled => "canceled", + }); + let activeness = task.get_activeness().map_or_else( || "not active".to_string(), |activeness| format!("{activeness:?}"), ); @@ -719,7 +718,7 @@ impl TurboTasksBackendInner { return value; } - if let Some(output) = get!(task, Output) { + if let Some(output) = task.get_output() { let result = match output { OutputValue::Cell(cell) => Ok(Ok(RawVc::TaskCell(cell.task, cell.cell))), OutputValue::Output(task) => Ok(Ok(RawVc::TaskOutput(*task))), @@ -729,7 +728,7 @@ impl TurboTasksBackendInner { }; if let Some(mut reader_task) = reader_task && options.tracking.should_track(result.is_err()) - && (!task.is_immutable() || cfg!(feature = "verify_immutable")) + && (!task.immutable() || cfg!(feature = "verify_immutable")) { #[cfg(feature = "trace_task_output_dependencies")] let _span = tracing::trace_span!( @@ -740,14 +739,11 @@ impl TurboTasksBackendInner { .entered(); let mut queue = LeafDistanceUpdateQueue::new(); let reader = reader.unwrap(); - if task.add(CachedDataItem::OutputDependent { - task: reader, - value: (), - }) { + if task.add_output_dependent(reader) { // Ensure that dependent leaf distance is strictly monotonic increasing - let leaf_distance = get!(task, LeafDistance).copied().unwrap_or_default(); + let leaf_distance = task.get_leaf_distance().copied().unwrap_or_default(); let reader_leaf_distance = - get!(reader_task, LeafDistance).copied().unwrap_or_default(); + reader_task.get_leaf_distance().copied().unwrap_or_default(); if reader_leaf_distance.distance <= leaf_distance.distance { queue.push( reader, @@ -764,14 +760,8 @@ impl TurboTasksBackendInner { // between grabbing the locks. If that happened, and if the task is "outdated" or // doesn't have the dependency edge yet, the invalidation would be lost. - if reader_task - .remove(&CachedDataItemKey::OutdatedOutputDependency { target: task_id }) - .is_none() - { - let _ = reader_task.add(CachedDataItem::OutputDependency { - target: task_id, - value: (), - }); + if !reader_task.remove_outdated_output_dependencies(&task_id) { + let _ = reader_task.add_output_dependencies(task_id); } drop(reader_task); @@ -794,14 +784,15 @@ impl TurboTasksBackendInner { }; // Output doesn't exist. We need to schedule the task to compute it. - let (item, listener) = CachedDataItem::new_scheduled_with_listener( + let (in_progress_state, listener) = InProgressState::new_scheduled_with_listener( TaskExecutionReason::OutputNotAvailable, || self.get_task_desc_fn(task_id), note, ); // It's not possible that the task is InProgress at this point. If it is InProgress { // done: true } it must have Output and would early return. - task.add_new(item); + let old = task.set_in_progress(in_progress_state); + debug_assert!(old.is_none(), "InProgress already exists"); ctx.schedule_task(task, TaskPriority::Initial); Ok(Err(listener)) @@ -826,15 +817,10 @@ impl TurboTasksBackendInner { key: Option, ) { if let Some(mut reader_task) = reader_task - && (!task.is_immutable() || cfg!(feature = "verify_immutable")) + && (!task.immutable() || cfg!(feature = "verify_immutable")) { let reader = reader.unwrap(); - let _ = task.add(CachedDataItem::CellDependent { - cell, - key, - task: reader, - value: (), - }); + let _ = task.add_cell_dependents((cell, key, reader)); drop(task); // Note: We use `task_pair` earlier to lock the task and its reader at the same @@ -846,15 +832,8 @@ impl TurboTasksBackendInner { task: task_id, cell, }; - if reader_task - .remove(&CachedDataItemKey::OutdatedCellDependency { target, key }) - .is_none() - { - let _ = reader_task.add(CachedDataItem::CellDependency { - target, - key, - value: (), - }); + if !reader_task.remove_outdated_cell_dependencies(&(target, key)) { + let _ = reader_task.add_cell_dependencies((target, key)); } drop(reader_task); } @@ -896,7 +875,7 @@ impl TurboTasksBackendInner { ))); } - let in_progress = get!(task, InProgress); + let in_progress = task.get_in_progress(); if matches!( in_progress, Some(InProgressState::InProgress(..) | InProgressState::Scheduled { .. }) @@ -906,13 +885,7 @@ impl TurboTasksBackendInner { let is_cancelled = matches!(in_progress, Some(InProgressState::Canceled)); // Check cell index range (cell might not exist at all) - let max_id = get!( - task, - CellTypeMaxIndex { - cell_type: cell.type_id - } - ) - .copied(); + let max_id = task.get_cell_type_max_index(&cell.type_id).copied(); let Some(max_id) = max_id else { if tracking.should_track(true) { add_cell_dependency(task_id, task, reader, reader_task, cell, tracking.key()); @@ -953,11 +926,10 @@ impl TurboTasksBackendInner { if is_cancelled { bail!("{} was canceled", ctx.get_task_description(task_id)); } - task.add_new(CachedDataItem::new_scheduled( - TaskExecutionReason::CellNotAvailable, - || self.get_task_desc_fn(task_id), - )); - ctx.schedule_task(task, TaskPriority::Initial); + let _ = task.add_scheduled(TaskExecutionReason::CellNotAvailable, || { + self.get_task_desc_fn(task_id) + }); + ctx.schedule_task(task, TaskPriority::initial()); Ok(Err(listener)) } @@ -979,17 +951,15 @@ impl TurboTasksBackendInner { } } }; - if let Some(in_progress) = get!(task, InProgressCell { cell }) { + if let Some(in_progress) = task.get_in_progress_cells(&cell) { // Someone else is already computing the cell let listener = in_progress.event.listen_with_note(note); return (listener, false); } let in_progress = InProgressCellState::new(task_id, cell); let listener = in_progress.event.listen_with_note(note); - task.add_new(CachedDataItem::InProgressCell { - cell, - value: in_progress, - }); + let old = task.insert_in_progress_cells(cell, in_progress); + debug_assert!(old.is_none(), "InProgressCell already exists"); (listener, true) } @@ -1642,7 +1612,7 @@ impl TurboTasksBackendInner { ) { let mut ctx = self.execute_context(turbo_tasks); let mut task = ctx.task(task_id, TaskDataCategory::Data); - if let Some(in_progress) = remove!(task, InProgress) { + if let Some(in_progress) = task.take_in_progress() { match in_progress { InProgressState::Scheduled { done_event, @@ -1654,9 +1624,8 @@ impl TurboTasksBackendInner { InProgressState::Canceled => {} } } - task.add_new(CachedDataItem::InProgress { - value: InProgressState::Canceled, - }); + let old = task.set_in_progress(InProgressState::Canceled); + debug_assert!(old.is_none(), "InProgress already exists"); } fn try_start_task_execution( @@ -1688,40 +1657,49 @@ impl TurboTasksBackendInner { ctx.prepare_tasks(tasks); task = ctx.task(task_id, TaskDataCategory::All); } - let in_progress = remove!(task, InProgress)?; + let in_progress = task.take_in_progress()?; let InProgressState::Scheduled { done_event, reason } = in_progress else { - task.add_new(CachedDataItem::InProgress { value: in_progress }); + let old = task.set_in_progress(in_progress); + debug_assert!(old.is_none(), "InProgress already exists"); return None; }; execution_reason = reason; - task.add_new(CachedDataItem::InProgress { - value: InProgressState::InProgress(Box::new(InProgressStateInner { + let old = task.set_in_progress(InProgressState::InProgress(Box::new( + InProgressStateInner { stale: false, once_task, done_event, session_dependent: false, marked_as_completed: false, new_children: Default::default(), - })), - }); + }, + ))); + debug_assert!(old.is_none(), "InProgress already exists"); // Make all current collectibles outdated (remove left-over outdated collectibles) enum Collectible { Current(CollectibleRef, i32), Outdated(CollectibleRef), } - let collectibles = iter_many!(task, Collectible { collectible } value => Collectible::Current(collectible, *value)) - .chain(iter_many!(task, OutdatedCollectible { collectible } => Collectible::Outdated(collectible))) - .collect::>(); + let collectibles = task + .iter_collectibles() + .map(|(&collectible, &value)| Collectible::Current(collectible, value)) + .chain( + task.iter_outdated_collectibles() + .map(|(collectible, _count)| Collectible::Outdated(*collectible)), + ) + .collect::>(); for collectible in collectibles { match collectible { Collectible::Current(collectible, value) => { - let _ = - task.insert(CachedDataItem::OutdatedCollectible { collectible, value }); + let _ = task.insert_outdated_collectible(collectible, value); } Collectible::Outdated(collectible) => { - if !task.has_key(&CachedDataItemKey::Collectible { collectible }) { - task.remove(&CachedDataItemKey::OutdatedCollectible { collectible }); + if task + .collectibles() + .is_none_or(|m| m.get(&collectible).is_none()) + { + task.remove_outdated_collectibles(&collectible); } } } @@ -1729,50 +1707,11 @@ impl TurboTasksBackendInner { if self.should_track_dependencies() { // Make all dependencies outdated - let outdated_cell_dependencies_to_add = - iter_many!(task, CellDependency { target, key } => (target, key)) - .collect::>(); - let outdated_cell_dependencies_to_remove = - iter_many!(task, OutdatedCellDependency { target, key } => (target, key)) - .filter(|&(target, key)| { - !task.has_key(&CachedDataItemKey::CellDependency { target, key }) - }) - .collect::>(); - task.extend( - CachedDataItemType::OutdatedCellDependency, - outdated_cell_dependencies_to_add - .into_iter() - .map(|(target, key)| CachedDataItem::OutdatedCellDependency { - target, - key, - value: (), - }), - ); - for (target, key) in outdated_cell_dependencies_to_remove { - task.remove(&CachedDataItemKey::OutdatedCellDependency { target, key }); - } + let cell_dependencies = task.iter_cell_dependencies().collect(); + task.set_outdated_cell_dependencies(cell_dependencies); - let outdated_output_dependencies_to_add = - iter_many!(task, OutputDependency { target } => target) - .collect::>(); - let outdated_output_dependencies_to_remove = - iter_many!(task, OutdatedOutputDependency { target } => target) - .filter(|&target| { - !task.has_key(&CachedDataItemKey::OutputDependency { target }) - }) - .collect::>(); - task.extend( - CachedDataItemType::OutdatedOutputDependency, - outdated_output_dependencies_to_add - .into_iter() - .map(|target| CachedDataItem::OutdatedOutputDependency { - target, - value: (), - }), - ); - for target in outdated_output_dependencies_to_remove { - task.remove(&CachedDataItemKey::OutdatedOutputDependency { target }); - } + let outdated_output_dependencies = task.iter_output_dependencies().collect(); + task.set_outdated_output_dependencies(outdated_output_dependencies); } } @@ -1904,31 +1843,27 @@ impl TurboTasksBackendInner { return true; } - let mut removed_data = Vec::new(); - if self.task_execution_completed_finish( + let (stale, in_progress_cells) = self.task_execution_completed_finish( &mut ctx, task_id, #[cfg(feature = "verify_determinism")] no_output_set, new_output, - &mut removed_data, is_now_immutable, - ) { + ); + if stale { // Task was stale and has been rescheduled #[cfg(feature = "trace_task_details")] span.record("stale", "finish"); return true; } - self.task_execution_completed_cleanup( - &mut ctx, - task_id, - cell_counters, - is_error, - &mut removed_data, - ); + let removed_data = + self.task_execution_completed_cleanup(&mut ctx, task_id, cell_counters, is_error); + // Drop data outside of critical sections drop(removed_data); + drop(in_progress_cells); false } @@ -1943,7 +1878,7 @@ impl TurboTasksBackendInner { has_invalidator: bool, ) -> Option { let mut task = ctx.task(task_id, TaskDataCategory::All); - let Some(in_progress) = get_mut!(task, InProgress) else { + let Some(in_progress) = task.get_in_progress_mut() else { panic!("Task execution completed, but task is not in progress: {task:#?}"); }; if matches!(in_progress, InProgressState::Canceled) { @@ -1973,19 +1908,18 @@ impl TurboTasksBackendInner { done_event, mut new_children, .. - })) = remove!(task, InProgress) + })) = task.take_in_progress() else { unreachable!(); }; - task.add_new(CachedDataItem::InProgress { - value: InProgressState::Scheduled { - done_event, - reason: TaskExecutionReason::Stale, - }, + let old = task.set_in_progress(InProgressState::Scheduled { + done_event, + reason: TaskExecutionReason::Stale, }); + debug_assert!(old.is_none(), "InProgress already exists"); // Remove old children from new_children to leave only the children that had their // active count increased - for task in iter_many!(task, Child { task } => task) { + for task in task.iter_children() { new_children.remove(&task); } drop(task); @@ -2006,36 +1940,27 @@ impl TurboTasksBackendInner { // handle has_invalidator if has_invalidator { - let _ = task.add(CachedDataItem::HasInvalidator { value: () }); + task.set_invalidator(true); } // handle cell counters: update max index and remove cells that are no longer used - let old_counters: FxHashMap<_, _> = - get_many!(task, CellTypeMaxIndex { cell_type } max_index => (cell_type, *max_index)); + let old_counters: FxHashMap<_, _> = task + .iter_cell_type_max_index() + .map(|(&k, &v)| (k, v)) + .collect(); let mut counters_to_remove = old_counters.clone(); - task.extend( - CachedDataItemType::CellTypeMaxIndex, - cell_counters.iter().filter_map(|(&cell_type, &max_index)| { - if let Some(old_max_index) = counters_to_remove.remove(&cell_type) { - if old_max_index != max_index { - Some(CachedDataItem::CellTypeMaxIndex { - cell_type, - value: max_index, - }) - } else { - None - } - } else { - Some(CachedDataItem::CellTypeMaxIndex { - cell_type, - value: max_index, - }) + for (&cell_type, &max_index) in cell_counters.iter() { + if let Some(old_max_index) = counters_to_remove.remove(&cell_type) { + if old_max_index != max_index { + task.insert_cell_type_max_index(cell_type, max_index); } - }), - ); + } else { + task.insert_cell_type_max_index(cell_type, max_index); + } + } for (cell_type, _) in counters_to_remove { - task.remove(&CachedDataItemKey::CellTypeMaxIndex { cell_type }); + task.remove_cell_type_max_index(&cell_type); } let mut queue = AggregationUpdateQueue::new(); @@ -2043,21 +1968,21 @@ impl TurboTasksBackendInner { let mut old_edges = Vec::new(); let has_children = !new_children.is_empty(); - let is_immutable = task.is_immutable(); + let is_immutable = task.immutable(); let task_dependencies_for_immutable = // Task was previously marked as immutable if !is_immutable // Task is not session dependent (session dependent tasks can change between sessions) && !session_dependent // Task has no invalidator - && !task.has_key(&CachedDataItemKey::HasInvalidator {}) + && !task.invalidator() // Task has no dependencies on collectibles - && count!(task, CollectiblesDependency) == 0 + && task.is_collectibles_dependencies_empty() { Some( // Collect all dependencies on tasks to check if all dependencies are immutable - iter_many!(task, OutputDependency { target } => target) - .chain(iter_many!(task, CellDependency { target, key: _ } => target.task)) + task.iter_output_dependencies() + .chain(task.iter_cell_dependencies().map(|(target, _key)| target.task)) .collect::>(), ) } else { @@ -2070,32 +1995,38 @@ impl TurboTasksBackendInner { // Filter actual new children old_edges.extend( - iter_many!(task, Child { task } => task) + task.iter_children() .filter(|task| !new_children.remove(task)) .map(OutdatedEdge::Child), ); } else { - old_edges.extend(iter_many!(task, Child { task } => task).map(OutdatedEdge::Child)); + old_edges.extend(task.iter_children().map(OutdatedEdge::Child)); } old_edges.extend( - task.iter(CachedDataItemType::OutdatedCollectible) - .filter_map(|(key, value)| match (key, value) { - ( - CachedDataItemKey::OutdatedCollectible { collectible }, - CachedDataItemValueRef::OutdatedCollectible { value }, - ) => Some(OutdatedEdge::Collectible(collectible, *value)), - _ => None, - }), + task.iter_outdated_collectibles() + .map(|(&collectible, &count)| OutdatedEdge::Collectible(collectible, count)), ); if self.should_track_dependencies() { - old_edges.extend(iter_many!(task, OutdatedCellDependency { target, key } => OutdatedEdge::CellDependency(target, key))); - old_edges.extend(iter_many!(task, OutdatedOutputDependency { target } => OutdatedEdge::OutputDependency(target))); + // IMPORTANT: Use iter_outdated_* here, NOT iter_* (active dependencies). + // At execution start, active deps are copied to outdated as a "before" snapshot. + // During execution, new deps are added to active. + // Here at completion, we clean up only the OUTDATED deps (the "before" snapshot). + // Using iter_* (active) instead would incorrectly clean up deps that are still valid, + // breaking dependency tracking. + old_edges.extend( + task.iter_outdated_cell_dependencies() + .map(|(target, key)| OutdatedEdge::CellDependency(target, key)), + ); + old_edges.extend( + task.iter_outdated_output_dependencies() + .map(OutdatedEdge::OutputDependency), + ); } // Check if output need to be updated - let current_output = get!(task, Output); + let current_output = task.get_output(); #[cfg(feature = "verify_determinism")] let no_output_set = current_output.is_none(); let new_output = match result { @@ -2140,7 +2071,7 @@ impl TurboTasksBackendInner { let mut output_dependent_tasks = SmallVec::<[_; 4]>::new(); // When output has changed, grab the dependent tasks if new_output.is_some() && ctx.should_track_dependencies() { - output_dependent_tasks = get_many!(task, OutputDependent { task } => task); + output_dependent_tasks = task.iter_output_dependent().collect(); } drop(task); @@ -2150,7 +2081,7 @@ impl TurboTasksBackendInner { if let Some(dependencies) = task_dependencies_for_immutable && dependencies .iter() - .all(|&task_id| ctx.task(task_id, TaskDataCategory::Meta).is_immutable()) + .all(|&task_id| ctx.task(task_id, TaskDataCategory::Meta).immutable()) { is_now_immutable = true; } @@ -2214,7 +2145,7 @@ impl TurboTasksBackendInner { } let mut make_stale = true; let dependent = ctx.task(dependent_task_id, TaskDataCategory::All); - if dependent.has_key(&CachedDataItemKey::OutdatedOutputDependency { target: task_id }) { + if dependent.outdated_output_dependencies_contains(&task_id) { #[cfg(feature = "trace_task_output_dependencies")] span.record("result", "outdated dependency"); // output dependency is outdated, so it hasn't read the output yet @@ -2222,7 +2153,7 @@ impl TurboTasksBackendInner { // But importantly we still need to make the task dirty as it should no longer // be considered as "recomputation". make_stale = false; - } else if !dependent.has_key(&CachedDataItemKey::OutputDependency { target: task_id }) { + } else if !dependent.output_dependencies_contains(&task_id) { // output dependency has been removed, so the task doesn't depend on the // output anymore and doesn't need to be invalidated #[cfg(feature = "trace_task_output_dependencies")] @@ -2281,7 +2212,7 @@ impl TurboTasksBackendInner { let mut queue = AggregationUpdateQueue::new(); ctx.for_each_task_meta(new_children.iter().copied(), |child_task, ctx| { - if !child_task.has_key(&CachedDataItemKey::Output {}) { + if !child_task.has_output() { let child_id = child_task.id(); make_task_dirty_internal( child_task, @@ -2307,7 +2238,7 @@ impl TurboTasksBackendInner { debug_assert!(!new_children.is_empty()); let mut task = ctx.task(task_id, TaskDataCategory::All); - let Some(in_progress) = get!(task, InProgress) else { + let Some(in_progress) = task.get_in_progress() else { panic!("Task execution completed, but task is not in progress: {task:#?}"); }; if matches!(in_progress, InProgressState::Canceled) { @@ -2327,16 +2258,15 @@ impl TurboTasksBackendInner { #[cfg(not(feature = "no_fast_stale"))] if *stale { let Some(InProgressState::InProgress(box InProgressStateInner { done_event, .. })) = - remove!(task, InProgress) + task.take_in_progress() else { unreachable!(); }; - task.add_new(CachedDataItem::InProgress { - value: InProgressState::Scheduled { - done_event, - reason: TaskExecutionReason::Stale, - }, + let old = task.set_in_progress(InProgressState::Scheduled { + done_event, + reason: TaskExecutionReason::Stale, }); + debug_assert!(old.is_none(), "InProgress already exists"); drop(task); // All `new_children` are currently hold active with an active count and we need to undo @@ -2351,7 +2281,9 @@ impl TurboTasksBackendInner { } let has_active_count = ctx.should_track_activeness() - && get!(task, Activeness).map_or(false, |activeness| activeness.active_counter > 0); + && task + .get_activeness() + .is_some_and(|activeness| activeness.active_counter > 0); connect_children( ctx, task_id, @@ -2370,16 +2302,20 @@ impl TurboTasksBackendInner { task_id: TaskId, #[cfg(feature = "verify_determinism")] no_output_set: bool, new_output: Option, - removed_data: &mut Vec, is_now_immutable: bool, - ) -> bool { + ) -> ( + bool, + Option< + auto_hash_map::AutoMap, 1>, + >, + ) { let mut task = ctx.task(task_id, TaskDataCategory::All); - let Some(in_progress) = remove!(task, InProgress) else { + let Some(in_progress) = task.take_in_progress() else { panic!("Task execution completed, but task is not in progress: {task:#?}"); }; if matches!(in_progress, InProgressState::Canceled) { // Task was canceled in the meantime, so we don't finish it - return false; + return (false, None); } let InProgressState::InProgress(box InProgressStateInner { done_event, @@ -2396,49 +2332,41 @@ impl TurboTasksBackendInner { // If the task is stale, reschedule it if stale { - task.add_new(CachedDataItem::InProgress { - value: InProgressState::Scheduled { - done_event, - reason: TaskExecutionReason::Stale, - }, + let old = task.set_in_progress(InProgressState::Scheduled { + done_event, + reason: TaskExecutionReason::Stale, }); - return true; + debug_assert!(old.is_none(), "InProgress already exists"); + return (true, None); } // Set the output if it has changed let mut old_content = None; if let Some(value) = new_output { - old_content = task.insert(CachedDataItem::Output { value }); + old_content = task.set_output(value); } // If the task has no invalidator and has no mutable dependencies, it does not have a way // to be invalidated and we can mark it as immutable. if is_now_immutable { - let _ = task.add(CachedDataItem::Immutable { value: () }); + task.set_immutable(true); } - // Notify in progress cells - removed_data.extend(task.extract_if( - CachedDataItemType::InProgressCell, - |key, value| match (key, value) { - ( - CachedDataItemKey::InProgressCell { .. }, - CachedDataItemValueRef::InProgressCell { value }, - ) => { - value.event.notify(usize::MAX); - true - } - _ => false, - }, - )); + // Notify in progress cells and remove all of them + let in_progress_cells = task.take_in_progress_cells(); + if let Some(ref cells) = in_progress_cells { + for state in cells.values() { + state.event.notify(usize::MAX); + } + } // Grab the old dirty state - let old_dirtyness = get!(task, Dirty).cloned(); + let old_dirtyness = task.get_dirty().cloned(); let (old_self_dirty, old_current_session_self_clean) = match old_dirtyness { None => (false, false), Some(Dirtyness::Dirty(_)) => (true, false), Some(Dirtyness::SessionDependent) => { - let clean_in_current_session = get!(task, CurrentSessionClean).is_some(); + let clean_in_current_session = task.current_session_clean(); (true, clean_in_current_session) } }; @@ -2453,16 +2381,16 @@ impl TurboTasksBackendInner { // Update the dirty state if old_dirtyness != new_dirtyness { if let Some(value) = new_dirtyness { - task.insert(CachedDataItem::Dirty { value }); + task.set_dirty(value); } else if old_dirtyness.is_some() { - task.remove(&CachedDataItemKey::Dirty {}); + task.take_dirty(); } } if old_current_session_self_clean != new_current_session_self_clean { if new_current_session_self_clean { - task.insert(CachedDataItem::CurrentSessionClean { value: () }); + task.set_current_session_clean(true); } else if old_current_session_self_clean { - task.remove(&CachedDataItemKey::CurrentSessionClean {}); + task.set_current_session_clean(false); } } @@ -2470,13 +2398,14 @@ impl TurboTasksBackendInner { let data_update = if old_self_dirty != new_self_dirty || old_current_session_self_clean != new_current_session_self_clean { - let dirty_container_count = get!(task, AggregatedDirtyContainerCount) + let dirty_container_count = task + .get_aggregated_dirty_container_count() .cloned() .unwrap_or_default(); - let current_session_clean_container_count = - get!(task, AggregatedCurrentSessionCleanContainerCount) - .copied() - .unwrap_or_default(); + let current_session_clean_container_count = task + .get_aggregated_current_session_clean_container_count() + .copied() + .unwrap_or_default(); let result = ComputeDirtyAndCleanUpdate { old_dirty_container_count: dirty_container_count, new_dirty_container_count: dirty_container_count, @@ -2490,11 +2419,11 @@ impl TurboTasksBackendInner { .compute(); if result.dirty_count_update - result.current_session_clean_update < 0 { // The task is clean now - if let Some(activeness_state) = get_mut!(task, Activeness) { + if let Some(activeness_state) = task.get_activeness_mut() { activeness_state.all_clean_event.notify(usize::MAX); activeness_state.unset_active_until_clean(); if activeness_state.is_empty() { - task.remove(&CachedDataItemKey::Activeness {}); + task.take_activeness(); } } } @@ -2512,12 +2441,11 @@ impl TurboTasksBackendInner { #[cfg(not(feature = "verify_determinism"))] let reschedule = false; if reschedule { - task.add_new(CachedDataItem::InProgress { - value: InProgressState::Scheduled { - done_event, - reason: TaskExecutionReason::Stale, - }, + let old = task.set_in_progress(InProgressState::Scheduled { + done_event, + reason: TaskExecutionReason::Stale, }); + debug_assert!(old.is_none(), "InProgress already exists"); drop(task); } else { drop(task); @@ -2532,7 +2460,8 @@ impl TurboTasksBackendInner { AggregationUpdateQueue::run(data_update, ctx); } - reschedule + // We return so the data can be dropped outside of critical sections + (reschedule, in_progress_cells) } fn task_execution_completed_cleanup( @@ -2541,47 +2470,54 @@ impl TurboTasksBackendInner { task_id: TaskId, cell_counters: &AutoMap, 8>, is_error: bool, - removed_data: &mut Vec, - ) { + ) -> Vec { let mut task = ctx.task(task_id, TaskDataCategory::All); - + let mut removed_cell_data = Vec::new(); // An error is potentially caused by a eventual consistency, so we avoid updating cells // after an error as it is likely transient and we want to keep the dependent tasks // clean to avoid re-executions. if !is_error { // Remove no longer existing cells and // find all outdated data items (removed cells, outdated edges) - // Note: For persistent tasks we only want to call extract_if when there are actual - // cells to remove to avoid tracking that as modification. // Note: We do not mark the tasks as dirty here, as these tasks are unused or stale // anyway and we want to avoid needless re-executions. When the cells become // used again, they are invalidated from the update cell operation. - let is_cell_unused = |cell: CellId| { - cell_counters - .get(&cell.type_id) - .is_none_or(|start_index| cell.index >= *start_index) - }; - if task_id.is_transient() - || iter_many!(task, CellData { cell } => cell).any(is_cell_unused) - { - removed_data.extend(task.extract_if(CachedDataItemType::CellData, |key, _| { - matches!(key, CachedDataItemKey::CellData { cell } if cell_counters.get(&cell.type_id).is_none_or(|start_index| cell.index >= *start_index)) - })); + // Remove cell data for cells that no longer exist + let to_remove_persistent: Vec<_> = task + .iter_persistent_cell_data() + .filter_map(|(cell, _)| { + cell_counters + .get(&cell.type_id) + .is_none_or(|start_index| cell.index >= *start_index) + .then_some(*cell) + }) + .collect(); + + // Remove transient cell data for cells that no longer exist + let to_remove_transient: Vec<_> = task + .iter_transient_cell_data() + .filter_map(|(cell, _)| { + cell_counters + .get(&cell.type_id) + .is_none_or(|start_index| cell.index >= *start_index) + .then_some(*cell) + }) + .collect(); + removed_cell_data.reserve_exact(to_remove_persistent.len() + to_remove_transient.len()); + for cell in to_remove_persistent { + if let Some(data) = task.remove_persistent_cell_data(&cell) { + removed_cell_data.push(data.into_untyped()); + } } - if task_id.is_transient() - || iter_many!(task, TransientCellData { cell } => cell).any(is_cell_unused) - { - removed_data.extend(task.extract_if( - CachedDataItemType::TransientCellData, - |key, _| { - matches!(key, CachedDataItemKey::TransientCellData { cell } if cell_counters.get(&cell.type_id).is_none_or(|start_index| cell.index >= *start_index)) - }, - )); + for cell in to_remove_transient { + if let Some(data) = task.remove_transient_cell_data(&cell) { + removed_cell_data.push(data); + } } } // Shrink memory usage for collections that are only mutated during/after execution - task.shrink_cell_data(); + task.shrink_persistent_cell_data(); task.shrink_transient_cell_data(); task.shrink_cell_type_max_index(); task.shrink_cell_dependencies(); @@ -2596,6 +2532,9 @@ impl TurboTasksBackendInner { task.shrink_in_progress_cells(); drop(task); + + // Return so we can drop outside of critical sections + removed_cell_data } fn run_backend_job<'a>( @@ -2732,36 +2671,28 @@ impl TurboTasksBackendInner { ); task = ctx.task(task_id, TaskDataCategory::All); } - for collectible in iter_many!( - task, - AggregatedCollectible { - collectible - } count if collectible.collectible_type == collectible_type && *count > 0 => { - collectible.cell + for (collectible, count) in task.iter_aggregated_collectibles() { + if *count > 0 && collectible.collectible_type == collectible_type { + *collectibles + .entry(RawVc::TaskCell( + collectible.cell.task, + collectible.cell.cell, + )) + .or_insert(0) += 1; } - ) { - *collectibles - .entry(RawVc::TaskCell(collectible.task, collectible.cell)) - .or_insert(0) += 1; - } - for (collectible, count) in iter_many!( - task, - Collectible { - collectible - } count if collectible.collectible_type == collectible_type => { - (collectible.cell, *count) + } + for (&collectible, &count) in task.iter_collectibles() { + if collectible.collectible_type == collectible_type { + *collectibles + .entry(RawVc::TaskCell( + collectible.cell.task, + collectible.cell.cell, + )) + .or_insert(0) += count; } - ) { - *collectibles - .entry(RawVc::TaskCell(collectible.task, collectible.cell)) - .or_insert(0) += count; } if let Some(reader_id) = reader_id { - let _ = task.add(CachedDataItem::CollectiblesDependent { - collectible_type, - task: reader_id, - value: (), - }); + let _ = task.add_collectibles_dependents((collectible_type, reader_id)); } } if let Some(reader_id) = reader_id { @@ -2770,11 +2701,8 @@ impl TurboTasksBackendInner { task: task_id, collectible_type, }; - if reader - .remove(&CachedDataItemKey::OutdatedCollectiblesDependency { target }) - .is_none() - { - let _ = reader.add(CachedDataItem::CollectiblesDependency { target, value: () }); + if !reader.remove_outdated_collectibles_dependencies(&target) { + let _ = reader.add_collectibles_dependencies(target); } } collectibles @@ -2886,7 +2814,7 @@ impl TurboTasksBackendInner { if let Some(InProgressState::InProgress(box InProgressStateInner { session_dependent, .. - })) = get_mut!(task, InProgress) + })) = task.get_in_progress_mut() { *session_dependent = true; } @@ -2902,7 +2830,7 @@ impl TurboTasksBackendInner { if let Some(InProgressState::InProgress(box InProgressStateInner { marked_as_completed, .. - })) = get_mut!(task, InProgress) + })) = task.get_in_progress_mut() { *marked_as_completed = true; // TODO this should remove the dirty state (also check session_dependent) @@ -2954,27 +2882,7 @@ impl TurboTasksBackendInner { ); { let mut task = self.storage.access_mut(task_id); - task.add(CachedDataItem::AggregationNumber { - value: AggregationNumber { - base: u32::MAX, - distance: 0, - effective: u32::MAX, - }, - }); - if self.should_track_activeness() { - task.add(CachedDataItem::Activeness { - value: ActivenessState::new_root(root_type, task_id), - }); - } - task.add(CachedDataItem::new_scheduled( - TaskExecutionReason::Initial, - move || { - move || match root_type { - RootType::RootTask => "Root Task".to_string(), - RootType::OnceTask => "Once Task".to_string(), - } - }, - )); + (*task).init_transient_task(task_id, root_type, self.should_track_activeness()); } #[cfg(feature = "verify_aggregation_graph")] self.root_tasks.lock().insert(task_id); @@ -2994,12 +2902,12 @@ impl TurboTasksBackendInner { let is_dirty = task.is_dirty(); let has_dirty_containers = task.has_dirty_containers(); if is_dirty.is_some() || has_dirty_containers { - if let Some(activeness_state) = get_mut!(task, Activeness) { + if let Some(activeness_state) = task.get_activeness_mut() { // We will finish the task, but it would be removed after the task is done activeness_state.unset_root_type(); activeness_state.set_active_until_clean(); }; - } else if let Some(activeness_state) = remove!(task, Activeness) { + } else if let Some(activeness_state) = task.take_activeness() { // Technically nobody should be listening to this event, but just in case // we notify it anyway activeness_state.all_clean_event.notify(usize::MAX); @@ -3060,7 +2968,10 @@ impl TurboTasksBackendInner { ); } - let aggregated_collectibles: Vec<_> = get_many!(task, AggregatedCollectible { collectible } value if *value > 0 => {collectible}); + let aggregated_collectibles: Vec<_> = task + .iter_aggregated_collectibles() + .map(|(collectible, _)| collectible) + .collect(); for collectible in aggregated_collectibles { collectibles .entry(collectible) @@ -3069,7 +2980,10 @@ impl TurboTasksBackendInner { .push(task_id); } - let own_collectibles: Vec<_> = get_many!(task, Collectible { collectible } value if *value > 0 => {collectible}); + let own_collectibles: Vec<_> = task + .iter_collectibles_entries() + .filter_map(|(&collectible, &value)| (value > 0).then_some(collectible)) + .collect::>(); for collectible in own_collectibles { if let Some((flag, _)) = collectibles.get_mut(&collectible) { *flag = true @@ -3081,7 +2995,7 @@ impl TurboTasksBackendInner { } } - let is_dirty = get!(task, Dirty).is_some(); + let is_dirty = task.has_dirty(); let has_dirty_container = task.has_dirty_containers(); let should_be_in_upper = is_dirty || has_dirty_container; @@ -3095,7 +3009,7 @@ impl TurboTasksBackendInner { // uppers // ); - for child_id in iter_many!(task, Child { task } => task) { + for child_id in task.iter_children() { // println!("{task_id}: child -> {child_id}"); if visited.insert(child_id) { queue.push_back(child_id); @@ -3106,10 +3020,14 @@ impl TurboTasksBackendInner { if should_be_in_upper { for upper_id in uppers { let task = ctx.task(upper_id, TaskDataCategory::All); - let in_upper = get!(task, AggregatedDirtyContainer { task: task_id }) + let in_upper = task + .get_aggregated_dirty_containers(&task_id) .is_some_and(|&dirty| dirty > 0); if !in_upper { - let containers: Vec<_> = get_many!(task, AggregatedDirtyContainer { task: task_id } value => (task_id, *value)); + let containers: Vec<_> = task + .iter_aggregated_dirty_containers_entries() + .map(|(&k, &v)| (k, v)) + .collect(); panic!( "Task {} ({}) is dirty, but is not listed in the upper task {} \ ({})\nThese dirty containers are present:\n{:#?}", @@ -3153,10 +3071,10 @@ impl TurboTasksBackendInner { while let Some(task_id) = queue.pop() { let desc = ctx.get_task_description(task_id); let task = ctx.task(task_id, TaskDataCategory::All); - let aggregated_collectible = - get!(task, AggregatedCollectible { collectible }) - .copied() - .unwrap_or_default(); + let aggregated_collectible = task + .get_aggregated_collectibles(&collectible) + .copied() + .unwrap_or_default(); let uppers = get_uppers(&task); drop(task); writeln!( diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs index 4c22991727330..c5311761f04a6 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/aggregation_update.rs @@ -28,17 +28,11 @@ use turbo_tasks::{FxIndexMap, TaskExecutionReason, TaskId}; use crate::backend::operation::invalidate::TaskDirtyCause; use crate::{ backend::{ - TaskDataCategory, get_mut, get_mut_or_insert_with, + TaskDataCategory, operation::{ExecuteContext, Operation, TaskGuard, invalidate::make_task_dirty}, - storage::{ - count, get, get_many, iter_many, remove, update, update_count, update_count_and_get, - }, storage_schema::TaskStorageAccessors, }, - data::{ - ActivenessState, AggregationNumber, CachedDataItem, CachedDataItemKey, CachedDataItemType, - CollectibleRef, - }, + data::{ActivenessState, AggregationNumber, CollectibleRef}, utils::swap_retain, }; @@ -67,9 +61,9 @@ fn get_followers_with_aggregation_number( aggregation_number: u32, ) -> TaskIdVec { if is_aggregating_node(aggregation_number) { - get_many!(task, Follower { task } count if *count > 0 => task) + task.iter_followers().map(|(&id, _count)| id).collect() } else { - get_many!(task, Child { task } => task) + task.iter_children().collect() } } @@ -82,17 +76,12 @@ fn get_followers(task: &impl TaskGuard) -> TaskIdVec { /// Returns a list of tasks that are considered as "upper" tasks of the task. The upper tasks are /// aggregating over the task. pub fn get_uppers(task: &impl TaskGuard) -> TaskIdVec { - get_many!(task, Upper { task } count if *count > 0 => task) -} - -/// Returns an iterator of tasks that are considered as "upper" tasks of the task. See `get_uppers` -fn iter_uppers<'a>(task: &'a (impl TaskGuard + 'a)) -> impl Iterator + 'a { - iter_many!(task, Upper { task } count if *count > 0 => task) + task.iter_upper().map(|(&id, _count)| id).collect() } /// Returns the aggregation number of the task. pub fn get_aggregation_number(task: &impl TaskGuard) -> u32 { - get!(task, AggregationNumber) + task.get_aggregation_number() .map(|a| a.effective) .unwrap_or_default() } @@ -363,27 +352,23 @@ impl AggregatedDataUpdate { let mut dirty_count = 0; let mut current_session_clean_count = 0; let mut collectibles_update: Vec<_> = - get_many!(task, Collectible { collectible } count => (collectible, *count)); + task.iter_collectibles().map(|(&k, &v)| (k, v)).collect(); if is_aggregating_node(aggregation) { - dirty_count = get!(task, AggregatedDirtyContainerCount) + dirty_count = task + .get_aggregated_dirty_container_count() .copied() .unwrap_or_default(); - current_session_clean_count = get!(task, AggregatedCurrentSessionCleanContainerCount) + current_session_clean_count = task + .get_aggregated_current_session_clean_container_count() .copied() .unwrap_or_default(); - let collectibles = iter_many!( - task, - AggregatedCollectible { - collectible - } count if *count > 0 => { - collectible + for (collectible, count) in task.iter_aggregated_collectibles() { + if *count > 0 { + collectibles_update.push((*collectible, 1)); } - ); - for collectible in collectibles { - collectibles_update.push((collectible, 1)); } } - let (dirty, current_session_clean) = task.dirty(); + let (dirty, current_session_clean) = task.dirty_state(); if dirty { dirty_count += 1; } @@ -450,7 +435,7 @@ impl AggregatedDataUpdate { // When a dirty container count is increased and the task is considered as active // we need to schedule the dirty tasks in the new dirty container let current_session_update = count - *current_session_clean_update; - if current_session_update > 0 && task.has_key(&CachedDataItemKey::Activeness {}) { + if current_session_update > 0 && task.has_activeness() { queue.push_find_and_schedule_dirty(dirty_container_id) } } @@ -460,27 +445,18 @@ impl AggregatedDataUpdate { let old_dirty_single_container_count; let new_dirty_single_container_count; if count != 0 { - new_dirty_single_container_count = update_count_and_get!( - task, - AggregatedDirtyContainer { - task: dirty_container_id - }, - count - ); + new_dirty_single_container_count = + task.update_and_get_aggregated_dirty_containers(dirty_container_id, count); old_dirty_single_container_count = new_dirty_single_container_count - count; dirty_container_count_update = before_after_to_diff_value( old_dirty_single_container_count > 0, new_dirty_single_container_count > 0, ); } else { - new_dirty_single_container_count = get!( - task, - AggregatedDirtyContainer { - task: dirty_container_id - } - ) - .copied() - .unwrap_or_default(); + new_dirty_single_container_count = task + .get_aggregated_dirty_containers(&dirty_container_id) + .copied() + .unwrap_or_default(); old_dirty_single_container_count = new_dirty_single_container_count; } @@ -488,25 +464,19 @@ impl AggregatedDataUpdate { let old_single_container_current_session_clean_count; let new_single_container_current_session_clean_count; if *current_session_clean_update != 0 { - new_single_container_current_session_clean_count = update_count_and_get!( - task, - AggregatedCurrentSessionCleanContainer { - task: dirty_container_id, - }, - *current_session_clean_update - ); + new_single_container_current_session_clean_count = task + .update_and_get_aggregated_current_session_clean_containers( + dirty_container_id, + *current_session_clean_update, + ); old_single_container_current_session_clean_count = new_single_container_current_session_clean_count - *current_session_clean_update; } else { - new_single_container_current_session_clean_count = get!( - task, - AggregatedCurrentSessionCleanContainer { - task: dirty_container_id, - } - ) - .copied() - .unwrap_or_default(); + new_single_container_current_session_clean_count = task + .get_aggregated_current_session_clean_containers(&dirty_container_id) + .copied() + .unwrap_or_default(); old_single_container_current_session_clean_count = new_single_container_current_session_clean_count; } @@ -522,7 +492,7 @@ impl AggregatedDataUpdate { before_after_to_diff_value(was_single_container_clean, is_single_container_clean); if dirty_container_count_update != 0 || current_session_clean_update != 0 { - let (is_self_dirty, current_session_self_clean) = task.dirty(); + let (is_self_dirty, current_session_self_clean) = task.dirty_state(); let task_id = task.id(); @@ -530,15 +500,15 @@ impl AggregatedDataUpdate { let old_dirty_container_count; let new_dirty_container_count; if dirty_container_count_update != 0 { - new_dirty_container_count = update_count_and_get!( - task, - AggregatedDirtyContainerCount, - dirty_container_count_update - ); + new_dirty_container_count = task + .update_and_get_aggregated_dirty_container_count( + dirty_container_count_update, + ); old_dirty_container_count = new_dirty_container_count - dirty_container_count_update; } else { - new_dirty_container_count = get!(task, AggregatedDirtyContainerCount) + new_dirty_container_count = task + .get_aggregated_dirty_container_count() .copied() .unwrap_or_default(); old_dirty_container_count = new_dirty_container_count; @@ -548,18 +518,17 @@ impl AggregatedDataUpdate { let new_current_session_clean_container_count; let old_current_session_clean_container_count; if current_session_clean_update != 0 { - new_current_session_clean_container_count = update_count_and_get!( - task, - AggregatedCurrentSessionCleanContainerCount, - current_session_clean_update - ); + new_current_session_clean_container_count = task + .update_and_get_aggregated_current_session_clean_container_count( + current_session_clean_update, + ); old_current_session_clean_container_count = new_current_session_clean_container_count - current_session_clean_update; } else { - new_current_session_clean_container_count = - get!(task, AggregatedCurrentSessionCleanContainerCount) - .copied() - .unwrap_or_default(); + new_current_session_clean_container_count = task + .get_aggregated_current_session_clean_container_count() + .copied() + .unwrap_or_default(); old_current_session_clean_container_count = new_current_session_clean_container_count; }; @@ -584,11 +553,11 @@ impl AggregatedDataUpdate { { // When the current task is no longer dirty, we need to fire the // aggregate root events and do some cleanup - if let Some(activeness_state) = get_mut!(task, Activeness) { + if let Some(activeness_state) = task.get_activeness_mut() { activeness_state.all_clean_event.notify(usize::MAX); activeness_state.unset_active_until_clean(); if activeness_state.is_empty() { - task.remove(&CachedDataItemKey::Activeness {}); + task.take_activeness(); } } } @@ -598,33 +567,22 @@ impl AggregatedDataUpdate { for (collectible, count) in collectibles_update { let mut added = false; let mut removed = false; - update!( - task, - AggregatedCollectible { - collectible: *collectible - }, - |old: Option| { - let old = old.unwrap_or(0); - let new = old + *count; - if old <= 0 && new > 0 { - added = true; - } else if old > 0 && new <= 0 { - removed = true; - } - (new != 0).then_some(new) + task.update_aggregated_collectibles(*collectible, |old: Option| { + let old = old.unwrap_or(0); + let new = old + *count; + if old <= 0 && new > 0 { + added = true; + } else if old > 0 && new <= 0 { + removed = true; } - ); + (new != 0).then_some(new) + }); if added || removed { let ty = collectible.collectible_type; - let dependent: TaskIdVec = get_many!( - task, - CollectiblesDependent { - collectible_type, - task, - } if collectible_type == ty => { - task - } - ); + let dependent: TaskIdVec = task + .iter_collectibles_dependents() + .filter_map(|(collectible_type, task)| (collectible_type == ty).then_some(task)) + .collect(); if !dependent.is_empty() { queue.push(AggregationUpdateJob::InvalidateDueToCollectiblesChange { task_ids: dependent, @@ -1011,14 +969,14 @@ impl AggregationUpdateQueue { /// Runs the job and all dependent jobs until it's done. It can persist the operation, so /// following code might not be executed when persisted. - pub fn run(job: AggregationUpdateJob, ctx: &mut impl ExecuteContext) { + pub fn run(job: AggregationUpdateJob, ctx: &mut impl ExecuteContext<'_>) { let mut queue = Self::new(); queue.push(job); queue.execute(ctx); } /// Executes a single step of the queue. Returns true, when the queue is empty. - pub fn process(&mut self, ctx: &mut impl ExecuteContext) -> bool { + pub fn process(&mut self, ctx: &mut impl ExecuteContext<'_>) -> bool { if let Some(job) = self.jobs.pop_front() { let job: AggregationUpdateJobGuard = job.entered(); match job.job { @@ -1320,25 +1278,22 @@ impl AggregationUpdateQueue { if should_be_inner { // remove all follower edges - let count = remove!(upper, Follower { task: task_id }).unwrap_or_default(); + let count = upper.remove_followers(&task_id).unwrap_or_default(); match count.cmp(&0) { - std::cmp::Ordering::Less => upper.add_new(CachedDataItem::Follower { - task: task_id, - value: count, - }), + std::cmp::Ordering::Less => upper.add_followers(task_id, count), std::cmp::Ordering::Greater => { #[cfg(feature = "trace_aggregation_update")] let _span = trace_span!("make inner").entered(); - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } let upper_ids = get_uppers(&upper); // Add the same amount of upper edges - if update_count!(task, Upper { task: upper_id }, count) { - if count!(task, Upper).is_power_of_two() { + if task.update_upper_count(upper_id, count) { + if task.upper_len().is_power_of_two() { self.push_optimize_task(task_id); } // When this is a new inner node, update aggregated data and @@ -1364,9 +1319,7 @@ impl AggregationUpdateQueue { }); } - if ctx.should_track_activeness() - && upper.has_key(&CachedDataItemKey::Activeness {}) - { + if ctx.should_track_activeness() && upper.has_activeness() { // If the upper node is has `Activeness` we need to schedule the // dirty tasks in the new dirty container self.push_find_and_schedule_dirty(task_id); @@ -1385,7 +1338,7 @@ impl AggregationUpdateQueue { if ctx.should_track_activeness() { // Follower was removed, we might need to update the active count let has_active_count = - get!(upper, Activeness).is_some_and(|a| a.active_counter > 0); + upper.get_activeness().is_some_and(|a| a.active_counter > 0); if has_active_count { // TODO combine both operations to avoid the clone self.push(AggregationUpdateJob::DecreaseActiveCount { task: task_id }) @@ -1396,12 +1349,9 @@ impl AggregationUpdateQueue { } } else if should_be_follower { // Remove the upper edge - let count = remove!(task, Upper { task: upper_id }).unwrap_or_default(); + let count = task.remove_upper(&upper_id).unwrap_or_default(); match count.cmp(&0) { - Ordering::Less => task.add_new(CachedDataItem::Upper { - task: upper_id, - value: count, - }), + Ordering::Less => task.add_upper(upper_id, count), Ordering::Greater => { #[cfg(feature = "trace_aggregation_update")] let _span = trace_span!("make follower").entered(); @@ -1409,15 +1359,15 @@ impl AggregationUpdateQueue { let upper_ids = get_uppers(&upper); // Add the same amount of follower edges - if update_count!(upper, Follower { task: task_id }, count) { + if upper.update_followers_count(task_id, count) { // May optimize the task - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } if ctx.should_track_activeness() { // update active count let has_active_count = - get!(upper, Activeness).is_some_and(|a| a.active_counter > 0); + upper.get_activeness().is_some_and(|a| a.active_counter > 0); if has_active_count { self.push(AggregationUpdateJob::IncreaseActiveCount { task: task_id, @@ -1465,7 +1415,7 @@ impl AggregationUpdateQueue { // both nodes have the same aggregation number // We need to change the aggregation number of the task - let current = get!(task, AggregationNumber).copied().unwrap_or_default(); + let current = task.get_aggregation_number().copied().unwrap_or_default(); self.push(AggregationUpdateJob::UpdateAggregationNumber { task_id, base_aggregation_number: current.base + 1, @@ -1503,7 +1453,7 @@ impl AggregationUpdateQueue { let dirty = task.is_dirty(); let should_schedule = if let Some(parent_priority) = dirty { Some((TaskExecutionReason::ActivateDirty, parent_priority)) - } else if !task.has_key(&CachedDataItemKey::Output {}) { + } else if !task.has_output() { Some(( TaskExecutionReason::ActivateInitial, ctx.get_current_task_priority(), @@ -1514,7 +1464,7 @@ impl AggregationUpdateQueue { // if it has `Activeness` we can skip visiting the nested nodes since // this would already be scheduled by the `Activeness` - let is_active_until_clean = get!(task, Activeness).is_some_and(|a| a.active_until_clean); + let is_active_until_clean = task.get_activeness().is_some_and(|a| a.active_until_clean); if !is_active_until_clean { let mut dirty_containers = task.dirty_containers().peekable(); let is_empty = dirty_containers.peek().is_none(); @@ -1522,23 +1472,22 @@ impl AggregationUpdateQueue { self.extend_find_and_schedule_dirty(dirty_containers); let activeness_state = - get_mut_or_insert_with!(task, Activeness, || ActivenessState::new(task_id)); + task.get_activeness_mut_or_insert_with(|| ActivenessState::new(task_id)); activeness_state.set_active_until_clean(); } } - if let Some((reason, parent_priority)) = should_schedule { - let description = || ctx.get_task_desc_fn(task_id); - if task.add(CachedDataItem::new_scheduled(reason, description)) { - drop(task); - ctx.schedule(task_id, parent_priority); - } + if let Some((reason, parent_priority)) = should_schedule + && task.add_scheduled(reason, || ctx.get_task_desc_fn(task_id)) + { + drop(task); + ctx.schedule(task_id, parent_priority); } } fn aggregated_data_update( &mut self, upper_ids: TaskIdVec, - ctx: &mut impl ExecuteContext, + ctx: &mut impl ExecuteContext<'_>, update: AggregatedDataUpdate, ) { // For performance reasons this should stay `Meta` and not `All` @@ -1561,7 +1510,7 @@ impl AggregationUpdateQueue { fn inner_of_uppers_lost_follower( &mut self, - ctx: &mut impl ExecuteContext, + ctx: &mut impl ExecuteContext<'_>, lost_follower_id: TaskId, mut upper_ids: TaskIdVec, mut retry: u16, @@ -1581,7 +1530,7 @@ impl AggregationUpdateQueue { let mut keep_upper = false; let mut follower_in_upper = false; - update!(follower, Upper { task: upper_id }, |old| { + follower.update_upper(upper_id, |old| { let Some(old) = old else { follower_in_upper = true; return None; @@ -1641,31 +1590,25 @@ impl AggregationUpdateQueue { ); let mut inner_in_upper = false; let mut removed_follower = false; - update!( - upper, - Follower { - task: lost_follower_id - }, - |old| { - let Some(old) = old else { - inner_in_upper = true; - return None; - }; - if old == 1 { - removed_follower = true; - return None; - } - Some(old - 1) + upper.update_followers(lost_follower_id, |old| { + let Some(old) = old else { + inner_in_upper = true; + return None; + }; + if old == 1 { + removed_follower = true; + return None; } - ); + Some(old - 1) + }); if removed_follower { // May optimize the task - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } let has_active_count = ctx.should_track_activeness() - && get!(upper, Activeness).is_some_and(|a| a.active_counter > 0); + && upper.get_activeness().is_some_and(|a| a.active_counter > 0); let upper_ids = get_uppers(&upper); drop(upper); // update active count @@ -1737,7 +1680,7 @@ impl AggregationUpdateQueue { ); let mut remove_upper = false; let mut follower_in_upper = false; - update!(follower, Upper { task: upper_id }, |old| { + follower.update_upper(upper_id, |old| { let Some(old) = old else { follower_in_upper = true; return None; @@ -1795,32 +1738,26 @@ impl AggregationUpdateQueue { ); let mut inner_in_upper = false; let mut removed_follower = false; - update!( - upper, - Follower { - task: lost_follower_id - }, - |old| { - let Some(old) = old else { - inner_in_upper = true; - return None; - }; - if old == 1 { - removed_follower = true; - return None; - } - Some(old - 1) + upper.update_followers(lost_follower_id, |old| { + let Some(old) = old else { + inner_in_upper = true; + return None; + }; + if old == 1 { + removed_follower = true; + return None; } - ); + Some(old - 1) + }); if removed_follower { // May optimize the task - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } let upper_ids = get_uppers(&upper); let has_active_count = - get!(upper, Activeness).is_some_and(|a| a.active_counter > 0); + upper.get_activeness().is_some_and(|a| a.active_counter > 0); drop(upper); // update active count if has_active_count { @@ -1902,28 +1839,22 @@ impl AggregationUpdateQueue { && upper_aggregation_number <= follower_aggregation_number { // It's a follower of the upper node - if update_count!( - upper, - Follower { - task: new_follower_id - }, - count - ) { + if upper.update_followers_count(new_follower_id, count) { // May optimize the task - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } if ctx.should_track_activeness() { // update active count let has_active_count = - get!(upper, Activeness).is_some_and(|a| a.active_counter > 0); + upper.get_activeness().is_some_and(|a| a.active_counter > 0); if has_active_count { tasks_for_which_increment_active_count.push(new_follower_id); } } // notify uppers about new follower - for upper_id in iter_uppers(&upper) { + for (&upper_id, _) in upper.iter_upper() { *upper_upper_ids_with_new_follower .entry(upper_id) .or_insert(0) += 1; @@ -1942,8 +1873,7 @@ impl AggregationUpdateQueue { false } else { // It's an inner node, continue with the list - if ctx.should_track_activeness() && upper.has_key(&CachedDataItemKey::Activeness {}) - { + if ctx.should_track_activeness() && upper.has_activeness() { is_active = true; } true @@ -1958,13 +1888,13 @@ impl AggregationUpdateQueue { ); let mut uppers_count: Option = None; let mut persistent_uppers = 0; + swap_retain(&mut upper_ids, |upper_item| { let (upper_id, count) = upper_item.task_id_and_count(); - if update_count!(follower, Upper { task: upper_id }, count) { + if follower.update_upper_count(upper_id, count) { // It's a new upper let uppers_count = uppers_count.get_or_insert_with(|| { - let count = - iter_many!(follower, Upper { .. } count if *count > 0 => ()).count(); + let count = follower.upper_len(); count - 1 }); *uppers_count += 1; @@ -1981,7 +1911,7 @@ impl AggregationUpdateQueue { #[cfg(feature = "trace_aggregation_update")] let _span = trace_span!("new inner").entered(); if !upper_ids.is_empty() { - let new_count = count!(follower, Upper); + let new_count = follower.upper_len(); if (new_count - persistent_uppers).next_power_of_two() != new_count.next_power_of_two() { @@ -2013,10 +1943,11 @@ impl AggregationUpdateQueue { ) } } + if !is_active { // We need to check this again, since this might have changed in the // meantime due to race conditions - if upper.has_key(&CachedDataItemKey::Activeness {}) { + if upper.has_activeness() { is_active = true; } } @@ -2092,7 +2023,7 @@ impl AggregationUpdateQueue { TaskDataCategory::Meta, ); if ctx.should_track_activeness() { - let activeness_state = get!(upper, Activeness); + let activeness_state = upper.get_activeness(); is_active = activeness_state.is_some(); has_active_count = activeness_state.is_some_and(|a| a.active_counter > 0); } @@ -2107,9 +2038,9 @@ impl AggregationUpdateQueue { return true; } // It's a follower of the upper node - if update_count!(upper, Follower { task: *follower_id }, *count) { + if upper.update_followers_count(*follower_id, *count) { // May optimize the task - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } @@ -2149,8 +2080,8 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - if update_count!(inner, Upper { task: upper_id }, count) { - if count!(inner, Upper).is_power_of_two() { + if inner.update_upper_count(upper_id, count) { + if inner.upper_len().is_power_of_two() { self.push_optimize_task(inner_id); } @@ -2239,7 +2170,7 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - is_active = upper.has_key(&CachedDataItemKey::Activeness {}); + is_active = upper.has_activeness(); } if is_active { self.extend_find_and_schedule_dirty( @@ -2275,7 +2206,7 @@ impl AggregationUpdateQueue { fn inner_of_upper_has_new_follower( &mut self, - ctx: &mut impl ExecuteContext, + ctx: &mut impl ExecuteContext<'_>, new_follower_id: TaskId, upper_id: TaskId, count: u32, @@ -2307,20 +2238,14 @@ impl AggregationUpdateQueue { let _span = trace_span!("new follower").entered(); // It's a follower of the upper node - if update_count!( - upper, - Follower { - task: new_follower_id - }, - count - ) { + if upper.update_followers_count(new_follower_id, count) { // May optimize the task - if count!(upper, Follower).is_power_of_two() { + if upper.followers_len().is_power_of_two() { self.push_optimize_task(upper_id); } let has_active_count = ctx.should_track_activeness() - && get!(upper, Activeness).is_some_and(|a| a.active_counter > 0); + && upper.get_activeness().is_some_and(|a| a.active_counter > 0); let upper_ids = get_uppers(&upper); drop(upper); // update active count @@ -2352,7 +2277,7 @@ impl AggregationUpdateQueue { let _span = trace_span!("new inner").entered(); // It's an inner node, continue with the list - let mut is_active = upper.has_key(&CachedDataItemKey::Activeness {}); + let mut is_active = upper.has_activeness(); drop(upper); let mut inner = ctx.task( @@ -2360,8 +2285,8 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - if update_count!(inner, Upper { task: upper_id }, count) { - if count!(inner, Upper).is_power_of_two() { + if inner.update_upper_count(upper_id, count) { + if inner.upper_len().is_power_of_two() { self.push_optimize_task(new_follower_id); } // It's a new upper @@ -2400,7 +2325,7 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - is_active = upper.has_key(&CachedDataItemKey::Activeness {}); + is_active = upper.has_activeness(); } if is_active { self.push_find_and_schedule_dirty(new_follower_id); @@ -2421,12 +2346,12 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - let state = get_mut_or_insert_with!(task, Activeness, || ActivenessState::new(task_id)); + let state = task.get_activeness_mut_or_insert_with(|| ActivenessState::new(task_id)); let is_new = state.is_empty(); let is_zero = state.decrement_active_counter(); let is_empty = state.is_empty(); if is_empty { - task.remove(&CachedDataItemKey::Activeness {}); + task.take_activeness(); } debug_assert!( !(is_new && is_zero), @@ -2465,13 +2390,13 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - let state = get_mut_or_insert_with!(task, Activeness, || ActivenessState::new(task_id)); + let state = task.get_activeness_mut_or_insert_with(|| ActivenessState::new(task_id)); let is_new = state.is_empty(); let is_positive_now = state.increment_active_counter(); let is_empty = state.is_empty(); // This can happen if active count was negative before if is_empty { - task.remove(&CachedDataItemKey::Activeness {}); + task.take_activeness(); } debug_assert!( !is_new || is_positive_now, @@ -2512,7 +2437,7 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - let current = get!(task, AggregationNumber).copied().unwrap_or_default(); + let current = task.get_aggregation_number().copied().unwrap_or_default(); let old = current.effective; // The base aggregation number can only increase let mut base_aggregation_number = max(current.base, base_aggregation_number); @@ -2532,12 +2457,10 @@ impl AggregationUpdateQueue { }; if old >= aggregation_number { if base_aggregation_number != current.base && distance != current.distance { - task.insert(CachedDataItem::AggregationNumber { - value: AggregationNumber { - base: base_aggregation_number, - distance, - effective: old, - }, + task.set_aggregation_number(AggregationNumber { + base: base_aggregation_number, + distance, + effective: old, }); } } else { @@ -2549,43 +2472,34 @@ impl AggregationUpdateQueue { aggregation_number ) .entered(); - task.insert(CachedDataItem::AggregationNumber { - value: AggregationNumber { - base: base_aggregation_number, - distance, - effective: aggregation_number, - }, + task.set_aggregation_number(AggregationNumber { + base: base_aggregation_number, + distance, + effective: aggregation_number, }); if !is_aggregating_node(old) && is_aggregating_node(aggregation_number) { // When converted from leaf to aggregating node, all children become // followers - let children: Vec<_> = get_many!(task, Child { task } => task); - task.extend_new( - CachedDataItemType::Follower, - children - .iter() - .map(|&task| CachedDataItem::Follower { task, value: 1 }), - ); + let children: Vec<_> = task.iter_children().collect(); + task.update_followers_counts(children.into_iter(), 1); } if is_aggregating_node(aggregation_number) { // followers might become inner nodes when the aggregation number is // increased - let followers = iter_many!(task, Follower { task } count if *count > 0 => task); - for follower_id in followers { + for (&follower_id, _) in task.iter_followers() { self.push(AggregationUpdateJob::BalanceEdge { upper_id: task_id, task_id: follower_id, }); } - let uppers = iter_uppers(&task); - for upper_id in uppers { + let uppers = task.iter_upper(); + for (&upper_id, _count) in uppers { self.push(AggregationUpdateJob::BalanceEdge { upper_id, task_id }); } } else { - let children = iter_many!(task, Child { task } => task); - for child_id in children { + for child_id in task.iter_children() { self.push(AggregationUpdateJob::UpdateAggregationNumber { task_id: child_id, base_aggregation_number: aggregation_number + 1, @@ -2609,24 +2523,24 @@ impl AggregationUpdateQueue { // For performance reasons this should stay `Meta` and not `All` TaskDataCategory::Meta, ); - let aggregation_number = get!(task, AggregationNumber).copied().unwrap_or_default(); + let aggregation_number = task.get_aggregation_number().copied().unwrap_or_default(); if is_root_node(aggregation_number.effective) { return; } let follower_count = if is_aggregating_node(aggregation_number.effective) { - let follower_count = count!(task, Follower); + let follower_count = task.followers_len(); if follower_count == 0 { return; } follower_count } else { - let children_count = count!(task, Child); + let children_count = task.children_len(); if children_count == 0 { return; } children_count }; - let upper_count = count!(task, Upper); + let upper_count = task.upper_len(); if upper_count <= 1 || upper_count.saturating_sub(1) * follower_count <= max( @@ -2729,7 +2643,7 @@ impl AggregationUpdateQueue { } impl Operation for AggregationUpdateQueue { - fn execute(mut self, ctx: &mut impl ExecuteContext) { + fn execute(mut self, ctx: &mut impl ExecuteContext<'_>) { loop { ctx.operation_suspend_point(&self); if self.process(ctx) { diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs index 8d27912df4f33..4acffba837e33 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/cleanup_old_edges.rs @@ -7,7 +7,7 @@ use turbo_tasks::TaskId; use crate::{ backend::{ - TaskDataCategory, get, get_many, + TaskDataCategory, operation::{ AggregatedDataUpdate, ExecuteContext, Operation, aggregation_update::{ @@ -15,10 +15,9 @@ use crate::{ get_aggregation_number, get_uppers, is_aggregating_node, }, }, - storage::update_count, storage_schema::TaskStorageAccessors, }, - data::{CachedDataItemKey, CellRef, CollectibleRef, CollectiblesRef}, + data::{CellRef, CollectibleRef, CollectiblesRef}, }; #[derive(Encode, Decode, Clone, Default)] @@ -50,7 +49,7 @@ impl CleanupOldEdgesOperation { task_id: TaskId, outdated: Vec, queue: AggregationUpdateQueue, - ctx: &mut impl ExecuteContext, + ctx: &mut impl ExecuteContext<'_>, ) { CleanupOldEdgesOperation::RemoveEdges { task_id, @@ -62,7 +61,7 @@ impl CleanupOldEdgesOperation { } impl Operation for CleanupOldEdgesOperation { - fn execute(mut self, ctx: &mut impl ExecuteContext) { + fn execute(mut self, ctx: &mut impl ExecuteContext<'_>) { loop { ctx.operation_suspend_point(&self); match self { @@ -84,8 +83,8 @@ impl Operation for CleanupOldEdgesOperation { _ => true, }); let mut task = ctx.task(task_id, TaskDataCategory::All); - for &child_id in children.iter() { - task.remove(&CachedDataItemKey::Child { task: child_id }); + for task_id in children.iter() { + task.remove_children(task_id); } if is_aggregating_node(get_aggregation_number(&task)) { queue.push(AggregationUpdateJob::InnerOfUpperLostFollowers { @@ -96,7 +95,8 @@ impl Operation for CleanupOldEdgesOperation { } else { let upper_ids = get_uppers(&task); let has_active_count = ctx.should_track_activeness() - && get!(task, Activeness) + && task + .get_activeness() .is_some_and(|a| a.active_counter > 0); drop(task); if has_active_count { @@ -127,19 +127,20 @@ impl Operation for CleanupOldEdgesOperation { let mut task = ctx.task(task_id, TaskDataCategory::All); let mut emptied_collectables = FxHashSet::default(); for (collectible, count) in collectibles.iter_mut() { - if update_count!( - task, - Collectible { - collectible: *collectible - }, - *count - ) { + if task + .update_collectibles_positive_crossing(*collectible, *count) + { emptied_collectables.insert(collectible.collectible_type); } } for ty in emptied_collectables { - let task_ids = get_many!(task, CollectiblesDependent { collectible_type, task } if collectible_type == ty => { task }); + let task_ids: SmallVec<[_; 4]> = task + .iter_collectibles_dependents() + .filter_map(|(collectible_type, task)| { + (collectible_type == ty).then_some(task) + }) + .collect(); queue.push( AggregationUpdateJob::InvalidateDueToCollectiblesChange { task_ids, @@ -162,21 +163,17 @@ impl Operation for CleanupOldEdgesOperation { ) => { { let mut task = ctx.task(cell_task_id, TaskDataCategory::Data); - task.remove(&CachedDataItemKey::CellDependent { - cell, - key, - task: task_id, - }); + task.remove_cell_dependents(&(cell, key, task_id)); } { let mut task = ctx.task(task_id, TaskDataCategory::Data); - task.remove(&CachedDataItemKey::CellDependency { - target: CellRef { + task.remove_cell_dependencies(&( + CellRef { task: cell_task_id, cell, }, key, - }); + )); } } OutdatedEdge::OutputDependency(output_task_id) => { @@ -189,15 +186,11 @@ impl Operation for CleanupOldEdgesOperation { .entered(); { let mut task = ctx.task(output_task_id, TaskDataCategory::Data); - task.remove(&CachedDataItemKey::OutputDependent { - task: task_id, - }); + task.remove_output_dependent(&task_id); } { let mut task = ctx.task(task_id, TaskDataCategory::Data); - task.remove(&CachedDataItemKey::OutputDependency { - target: output_task_id, - }); + task.remove_output_dependencies(&output_task_id); } } OutdatedEdge::CollectiblesDependency(CollectiblesRef { @@ -207,18 +200,16 @@ impl Operation for CleanupOldEdgesOperation { { let mut task = ctx.task(dependent_task_id, TaskDataCategory::Data); - task.remove(&CachedDataItemKey::CollectiblesDependent { + task.remove_collectibles_dependents(&( collectible_type, - task: task_id, - }); + task_id, + )); } { let mut task = ctx.task(task_id, TaskDataCategory::Data); - task.remove(&CachedDataItemKey::CollectiblesDependency { - target: CollectiblesRef { - collectible_type, - task: dependent_task_id, - }, + task.remove_collectibles_dependencies(&CollectiblesRef { + collectible_type, + task: dependent_task_id, }); } } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_child.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_child.rs index 3e14326df6a4c..16424953d73fe 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_child.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_child.rs @@ -3,14 +3,14 @@ use turbo_tasks::{TaskExecutionReason, TaskId}; use crate::{ backend::{ - TaskDataCategory, get_mut, + TaskDataCategory, operation::{ - ExecuteContext, Operation, + ExecuteContext, Operation, TaskGuard, aggregation_update::{AggregationUpdateJob, AggregationUpdateQueue}, }, storage_schema::TaskStorageAccessors, }, - data::{CachedDataItem, CachedDataItemKey, InProgressState, InProgressStateInner}, + data::{InProgressState, InProgressStateInner}, }; #[derive(Encode, Decode, Clone, Default)] @@ -33,7 +33,7 @@ impl ConnectChildOperation { let mut parent_task = ctx.task(parent_task_id, TaskDataCategory::All); let Some(InProgressState::InProgress(box InProgressStateInner { new_children, .. - })) = get_mut!(parent_task, InProgress) + })) = parent_task.get_in_progress_mut() else { panic!("Task is not in progress while calling another task: {parent_task:?}"); }; @@ -43,9 +43,7 @@ impl ConnectChildOperation { return; } - if parent_task.has_key(&CachedDataItemKey::Child { - task: child_task_id, - }) { + if parent_task.children_contains(&child_task_id) { // It is already connected, we can skip the rest return; } @@ -69,11 +67,10 @@ impl ConnectChildOperation { } else { let mut child_task = ctx.task(child_task_id, TaskDataCategory::All); - if !child_task.has_key(&CachedDataItemKey::Output {}) - && child_task.add(CachedDataItem::new_scheduled( - TaskExecutionReason::Connect, - || ctx.get_task_desc_fn(child_task_id), - )) + if !child_task.has_output() + && child_task.add_scheduled(TaskExecutionReason::Connect, || { + ctx.get_task_desc_fn(child_task_id) + }) { ctx.schedule_task(child_task, ctx.get_current_task_priority()); } @@ -87,7 +84,7 @@ impl ConnectChildOperation { } impl Operation for ConnectChildOperation { - fn execute(mut self, ctx: &mut impl ExecuteContext) { + fn execute(mut self, ctx: &mut impl ExecuteContext<'_>) { loop { ctx.operation_suspend_point(&self); match self { diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs index 3009a06f29bd2..c4db6a67d360c 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/connect_children.rs @@ -6,13 +6,10 @@ use turbo_tasks::{ util::{good_chunk_size, into_chunks}, }; -use crate::{ - backend::operation::{ - AggregationUpdateJob, AggregationUpdateQueue, ChildExecuteContext, ExecuteContext, - Operation, TaskGuard, aggregation_update::InnerOfUppersHasNewFollowersJob, - get_aggregation_number, get_uppers, is_aggregating_node, - }, - data::{CachedDataItem, CachedDataItemType}, +use crate::backend::operation::{ + AggregationUpdateJob, AggregationUpdateQueue, ChildExecuteContext, ExecuteContext, Operation, + TaskGuard, aggregation_update::InnerOfUppersHasNewFollowersJob, get_aggregation_number, + get_uppers, is_aggregating_node, }; pub fn connect_children( @@ -27,12 +24,13 @@ pub fn connect_children( let parent_aggregation = get_aggregation_number(&parent_task); - parent_task.extend_new( - CachedDataItemType::Child, - new_children.iter().map(|&new_child| CachedDataItem::Child { - task: new_child, - value: (), - }), + let old_children = parent_task.children_len(); + parent_task.extend_children(new_children.iter().copied()); + debug_assert!( + old_children + new_children.len() == parent_task.children_len(), + "Attempted to connect {len} new children, but some of them were already present in \ + {parent_task_id}", + len = new_children.len() ); let new_follower_ids: SmallVec<_> = new_children.into_iter().collect(); diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs index fd0be0958fd42..72c3876fcb877 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/invalidate.rs @@ -11,9 +11,8 @@ use crate::{ AggregationUpdateJob, AggregationUpdateQueue, ComputeDirtyAndCleanUpdate, }, }, - storage::{get, get_mut, remove}, }, - data::{CachedDataItem, CachedDataItemKey, Dirtyness, InProgressState, InProgressStateInner}, + data::{Dirtyness, InProgressState, InProgressStateInner}, }; #[derive(Encode, Decode, Clone, Default)] @@ -35,7 +34,7 @@ impl InvalidateOperation { pub fn run( task_ids: SmallVec<[TaskId; 4]>, #[cfg(feature = "trace_task_dirty")] cause: TaskDirtyCause, - mut ctx: impl ExecuteContext, + mut ctx: impl ExecuteContext<'_>, ) { InvalidateOperation::MakeDirty { task_ids, @@ -47,7 +46,7 @@ impl InvalidateOperation { } impl Operation for InvalidateOperation { - fn execute(mut self, ctx: &mut impl ExecuteContext) { + fn execute(mut self, ctx: &mut impl ExecuteContext<'_>) { loop { ctx.operation_suspend_point(&self); match self { @@ -183,7 +182,7 @@ pub fn make_task_dirty( task_id: TaskId, #[cfg(feature = "trace_task_dirty")] cause: TaskDirtyCause, queue: &mut AggregationUpdateQueue, - ctx: &mut impl ExecuteContext, + ctx: &mut impl ExecuteContext<'_>, ) { if ctx.is_once_task(task_id) { return; @@ -208,12 +207,12 @@ pub fn make_task_dirty_internal( make_stale: bool, #[cfg(feature = "trace_task_dirty")] cause: TaskDirtyCause, queue: &mut AggregationUpdateQueue, - ctx: &mut impl ExecuteContext, + ctx: &mut impl ExecuteContext<'_>, ) { // There must be no way to invalidate immutable tasks. If there would be a way the task is not // immutable. #[cfg(any(debug_assertions, feature = "verify_immutable"))] - if task.is_immutable() { + if task.immutable() { #[cfg(feature = "trace_task_dirty")] let extra_info = format!( " Invalidation cause: {}", @@ -231,7 +230,7 @@ pub fn make_task_dirty_internal( if make_stale && let Some(InProgressState::InProgress(box InProgressStateInner { stale, .. })) = - get_mut!(task, InProgress) + task.get_in_progress_mut() && !*stale { #[cfg(feature = "trace_task_dirty")] @@ -244,7 +243,7 @@ pub fn make_task_dirty_internal( .entered(); *stale = true; } - let current = get!(task, Dirty); + let current = task.get_dirty(); let (old_self_dirty, old_current_session_self_clean, parent_priority) = match current { Some(Dirtyness::Dirty(current_priority)) => { #[cfg(feature = "trace_task_dirty")] @@ -257,22 +256,19 @@ pub fn make_task_dirty_internal( .entered(); // already dirty let parent_priority = ctx.get_current_task_priority(); - if current_priority >= &parent_priority { + if *current_priority >= parent_priority { // Update the priority to be the lower one - task.insert(CachedDataItem::Dirty { - value: Dirtyness::Dirty(parent_priority), - }); + task.set_dirty(Dirtyness::Dirty(parent_priority)); } return; } Some(Dirtyness::SessionDependent) => { let parent_priority = ctx.get_current_task_priority(); - task.insert(CachedDataItem::Dirty { - value: Dirtyness::Dirty(parent_priority), - }); + task.set_dirty(Dirtyness::Dirty(parent_priority)); // It was a session-dependent dirty before, so we need to remove that clean count - let was_current_session_clean = remove!(task, CurrentSessionClean).is_some(); + let was_current_session_clean = task.current_session_clean(); if was_current_session_clean { + task.set_current_session_clean(false); // There was a clean count for a session. If it was the current session, we need to // propagate that change. (true, true, parent_priority) @@ -290,9 +286,7 @@ pub fn make_task_dirty_internal( } None => { let parent_priority = ctx.get_current_task_priority(); - task.insert(CachedDataItem::Dirty { - value: Dirtyness::Dirty(parent_priority), - }); + task.set_dirty(Dirtyness::Dirty(parent_priority)); // It was clean before, so we need to increase the dirty count (false, false, parent_priority) } @@ -301,13 +295,14 @@ pub fn make_task_dirty_internal( let new_self_dirty = true; let new_current_session_self_clean = false; - let dirty_container_count = get!(task, AggregatedDirtyContainerCount) + let dirty_container_count = task + .get_aggregated_dirty_container_count() + .copied() + .unwrap_or_default(); + let current_session_clean_container_count = task + .get_aggregated_current_session_clean_container_count() .copied() .unwrap_or_default(); - let current_session_clean_container_count = - get!(task, AggregatedCurrentSessionCleanContainerCount) - .copied() - .unwrap_or_default(); #[cfg(feature = "trace_task_dirty")] let _span = tracing::trace_span!( @@ -337,18 +332,15 @@ pub fn make_task_dirty_internal( )); } - let should_schedule = - !ctx.should_track_activeness() || task.has_key(&CachedDataItemKey::Activeness {}); + let should_schedule = !ctx.should_track_activeness() || task.has_activeness(); - if should_schedule { - let description = || ctx.get_task_desc_fn(task_id); - if task.add(CachedDataItem::new_scheduled( - TaskExecutionReason::Invalidated, - description, - )) { - drop(task); - let task = ctx.task(task_id, TaskDataCategory::All); - ctx.schedule_task(task, parent_priority); - } + if should_schedule + && task.add_scheduled(TaskExecutionReason::Invalidated, || { + ctx.get_task_desc_fn(task_id) + }) + { + drop(task); + let task = ctx.task(task_id, TaskDataCategory::All); + ctx.schedule_task(task, parent_priority); } } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/leaf_distance_update.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/leaf_distance_update.rs index 9fbd0b622b2eb..0a04eb6c68de2 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/leaf_distance_update.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/leaf_distance_update.rs @@ -9,14 +9,10 @@ use rustc_hash::FxHashMap; use tracing::{span::Span, trace_span}; use turbo_tasks::TaskId; -use crate::{ - backend::{ - TaskDataCategory, - operation::{ExecuteContext, Operation}, - storage::{get, iter_many}, - storage_schema::TaskStorageAccessors, - }, - data::CachedDataItem, +use crate::backend::{ + TaskDataCategory, + operation::{ExecuteContext, Operation}, + storage_schema::TaskStorageAccessors, }; /// The maximum number of leaf distance updates to process before yielding back to the executor. @@ -148,7 +144,7 @@ impl LeafDistanceUpdateQueue { TaskDataCategory::Data, ); debug_assert!(dependencies_max_distance_in_buffer < u32::MAX / 2); - let mut leaf_distance = get!(task, LeafDistance).copied().unwrap_or_default(); + let mut leaf_distance = task.get_leaf_distance().copied().unwrap_or_default(); if leaf_distance.distance > dependencies_distance { // It is strictly monotonic. No need to update. return; @@ -165,22 +161,17 @@ impl LeafDistanceUpdateQueue { // We are within the buffer zone, keep the max as is leaf_distance.distance = dependencies_distance + 1; } - let dependents = iter_many!(task, OutputDependent { task } => task) - // TODO Technically this is also needed, but there are cycles in the CellDependent graph - // So we need to handle that properly first - // When enabling this, make sure to also call the leaf update queue when adding CellDependents - // .chain(iter_many!(task, CellDependent { task, .. } => task)) - ; - for dependent_id in dependents { + // TODO Technically CellDependent is also needed, but there are cycles in the CellDependent + // graph. So we need to handle that properly first. When enabling this, make sure to also + // call the leaf update queue when adding CellDependents. + for dependent_id in task.iter_output_dependent() { self.push( dependent_id, leaf_distance.distance, leaf_distance.max_distance_in_buffer, ); } - task.insert(CachedDataItem::LeafDistance { - value: leaf_distance, - }); + task.set_leaf_distance(leaf_distance); } } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs index 979f526f90b75..e59f0dc4d8376 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/mod.rs @@ -16,17 +16,18 @@ use std::{ use bincode::{Decode, Encode}; use turbo_tasks::{ - CellId, FxIndexMap, TaskId, TaskPriority, TurboTasksBackendApi, TypedSharedReference, + CellId, FxIndexMap, TaskExecutionReason, TaskId, TaskPriority, TurboTasksBackendApi, + TypedSharedReference, }; use crate::{ backend::{ OperationGuard, TaskDataCategory, TransientTask, TurboTasksBackend, TurboTasksBackendInner, - storage::{SpecificTaskDataCategory, StorageWriteGuard, get, iter_many, remove}, + storage::{SpecificTaskDataCategory, StorageWriteGuard}, storage_schema::{TaskStorage, TaskStorageAccessors}, }, backing_storage::{BackingStorage, BackingStorageSealed}, - data::{CachedDataItemKey, Dirtyness}, + data::{ActivenessState, CollectibleRef, Dirtyness, InProgressState}, }; pub trait Operation: @@ -354,7 +355,7 @@ where }); } else { for idx in tasks_to_restore_for_meta_indicies { - tasks[idx].3 = Some(TaskStorage::default()); + tasks[idx].3 = Some(TaskStorage::new()); } } } @@ -609,9 +610,9 @@ where } fn schedule_task(&self, task: Self::TaskGuardImpl, parent_priority: TaskPriority) { - let priority = if get!(task, Output).is_some() { + let priority = if task.has_output() { TaskPriority::invalidation( - get!(task, LeafDistance) + task.get_leaf_distance() .copied() .unwrap_or_default() .distance, @@ -673,19 +674,60 @@ impl<'e, B: BackingStorage> ChildExecuteContext<'e> for ChildExecuteContextImpl< pub trait TaskGuard: Debug + TaskStorageAccessors { fn id(&self) -> TaskId; + /// Get mutable reference to the activeness state, inserting a new one if not present + fn get_activeness_mut_or_insert_with(&mut self, f: F) -> &mut ActivenessState + where + F: FnOnce() -> ActivenessState; + + // ============ Aggregated Container Count (scalar) APIs ============ + // These are for the scalar total count fields, not the CounterMap per-task fields. + + /// Update the aggregated dirty container count (the scalar total count field) by the given + /// delta and return the new value. + fn update_and_get_aggregated_dirty_container_count(&mut self, delta: i32) -> i32 { + let current = self + .get_aggregated_dirty_container_count() + .copied() + .unwrap_or(0); + let new_value = current + delta; + if new_value == 0 { + self.take_aggregated_dirty_container_count(); + } else { + self.set_aggregated_dirty_container_count(new_value); + } + new_value + } + + /// Update the aggregated current session clean container count (the scalar total count field) + /// by the given delta and return the new value. + fn update_and_get_aggregated_current_session_clean_container_count( + &mut self, + delta: i32, + ) -> i32 { + let current = self + .get_aggregated_current_session_clean_container_count() + .copied() + .unwrap_or(0); + let new_value = current + delta; + if new_value == 0 { + self.take_aggregated_current_session_clean_container_count(); + } else { + self.set_aggregated_current_session_clean_container_count(new_value); + } + new_value + } + fn invalidate_serialization(&mut self); /// Determine which tasks to prefetch for a task. /// Only returns Some once per task. /// It returns a set of tasks and which info is needed. fn prefetch(&mut self) -> Option>; - fn is_immutable(&self) -> bool { - self.has_key(&CachedDataItemKey::Immutable {}) - } + fn is_dirty(&self) -> Option { - get!(self, Dirty).and_then(|dirtyness| match dirtyness { + self.get_dirty().and_then(|dirtyness| match dirtyness { Dirtyness::Dirty(priority) => Some(*priority), Dirtyness::SessionDependent => { - if get!(self, CurrentSessionClean).is_none() { + if !self.current_session_clean() { Some(TaskPriority::leaf()) } else { None @@ -694,52 +736,54 @@ pub trait TaskGuard: Debug + TaskStorageAccessors { }) } fn dirtyness_and_session(&self) -> Option<(Dirtyness, bool)> { - match get!(self, Dirty)? { + match self.get_dirty()? { Dirtyness::Dirty(priority) => Some((Dirtyness::Dirty(*priority), false)), - Dirtyness::SessionDependent => Some(( - Dirtyness::SessionDependent, - get!(self, CurrentSessionClean).is_some(), - )), + Dirtyness::SessionDependent => { + Some((Dirtyness::SessionDependent, self.current_session_clean())) + } } } /// Returns (is_dirty, is_clean_in_current_session) - fn dirty(&self) -> (bool, bool) { - match get!(self, Dirty) { + fn dirty_state(&self) -> (bool, bool) { + match self.get_dirty() { None => (false, false), Some(Dirtyness::Dirty(_)) => (true, false), - Some(Dirtyness::SessionDependent) => (true, get!(self, CurrentSessionClean).is_some()), + Some(Dirtyness::SessionDependent) => (true, self.current_session_clean()), } } fn dirty_containers(&self) -> impl Iterator { self.dirty_containers_with_count() .map(|(task_id, _)| task_id) } - fn dirty_containers_with_count(&self) -> impl Iterator { - iter_many!(self, AggregatedDirtyContainer { task } count => (task, *count)).filter( - move |&(task_id, count)| { + fn dirty_containers_with_count(&self) -> impl Iterator + '_ { + let dirty_map = self.aggregated_dirty_containers(); + let clean_map = self.aggregated_current_session_clean_containers(); + dirty_map.into_iter().flat_map(move |map| { + map.iter().filter_map(move |(&task_id, &count)| { if count > 0 { - let clean_count = get!( - self, - AggregatedCurrentSessionCleanContainer { task: task_id } - ) - .copied() - .unwrap_or_default(); - count > clean_count - } else { - false + let clean_count = clean_map + .and_then(|m| m.get(&task_id)) + .copied() + .unwrap_or_default(); + if count > clean_count { + return Some((task_id, count)); + } } - }, - ) + None + }) + }) } fn has_dirty_containers(&self) -> bool { - let dirty_count = get!(self, AggregatedDirtyContainerCount) + let dirty_count = self + .get_aggregated_dirty_container_count() .copied() .unwrap_or_default(); if dirty_count <= 0 { return false; } - let clean_count = get!(self, AggregatedCurrentSessionCleanContainerCount) + let clean_count = self + .get_aggregated_current_session_clean_container_count() .copied() .unwrap_or_default(); dirty_count > clean_count @@ -750,9 +794,10 @@ pub trait TaskGuard: Debug + TaskStorageAccessors { cell: CellId, ) -> Option { if is_serializable_cell_content { - remove!(self, CellData { cell }) + self.remove_persistent_cell_data(&cell) } else { - remove!(self, TransientCellData { cell }).map(|sr| sr.into_typed(cell.type_id)) + self.remove_transient_cell_data(&cell) + .map(|sr| sr.into_typed(cell.type_id)) } } fn get_cell_data( @@ -761,18 +806,77 @@ pub trait TaskGuard: Debug + TaskStorageAccessors { cell: CellId, ) -> Option { if is_serializable_cell_content { - get!(self, CellData { cell }).cloned() + self.get_persistent_cell_data(&cell).cloned() } else { - get!(self, TransientCellData { cell }).map(|sr| sr.clone().into_typed(cell.type_id)) + self.get_transient_cell_data(&cell) + .map(|sr| sr.clone().into_typed(cell.type_id)) } } fn has_cell_data(&self, is_serializable_cell_content: bool, cell: CellId) -> bool { if is_serializable_cell_content { - self.has_key(&CachedDataItemKey::CellData { cell }) + self.persistent_cell_data_contains(&cell) + } else { + self.transient_cell_data_contains(&cell) + } + } + /// Set cell data, returning the old value if any. + fn set_cell_data( + &mut self, + is_serializable_cell_content: bool, + cell: CellId, + value: TypedSharedReference, + ) -> Option { + if is_serializable_cell_content { + self.insert_persistent_cell_data(cell, value) + } else { + self.insert_transient_cell_data(cell, value.into_untyped()) + .map(|sr| sr.into_typed(cell.type_id)) + } + } + + /// Add new cell data (asserts that the cell is new and didn't exist before). + fn add_cell_data( + &mut self, + is_serializable_cell_content: bool, + cell: CellId, + value: TypedSharedReference, + ) { + let old = self.set_cell_data(is_serializable_cell_content, cell, value); + assert!(old.is_none(), "Cell data already exists for {cell:?}"); + } + + /// Add a scheduled task item. Returns true if the task was successfully added (wasn't already + /// present). + #[must_use] + fn add_scheduled( + &mut self, + reason: TaskExecutionReason, + description: impl FnOnce() -> InnerFn, + ) -> bool + where + InnerFn: Fn() -> String + Sync + Send + 'static, + { + if self.has_in_progress() { + false } else { - self.has_key(&CachedDataItemKey::TransientCellData { cell }) + self.set_in_progress(InProgressState::new_scheduled(reason, description)); + true } } + + // ============ Collectible APIs ============ + + /// Insert an outdated collectible with count. Returns true if it was newly inserted. + #[must_use] + fn insert_outdated_collectible(&mut self, collectible: CollectibleRef, value: i32) -> bool { + // Check if already exists + if self.get_outdated_collectibles(&collectible).is_some() { + return false; + } + // Insert new entry + self.add_outdated_collectibles(collectible, value); + true + } } pub struct TaskGuardImpl<'a, B: BackingStorage> { @@ -797,34 +901,29 @@ impl TaskGuardImpl<'_, B> { /// before accessing the data. #[inline] #[track_caller] - fn check_access(&self, category: TaskDataCategory) { - { - match category { - TaskDataCategory::All => { - // This category is used for non-persisted data - } - TaskDataCategory::Data => { - #[cfg(debug_assertions)] - debug_assert!( - self.category == TaskDataCategory::Data - || self.category == TaskDataCategory::All, - "To read data of {:?} the task need to be accessed with this category \ - (It's accessed with {:?})", - category, - self.category - ); - } - TaskDataCategory::Meta => { - #[cfg(debug_assertions)] - debug_assert!( - self.category == TaskDataCategory::Meta - || self.category == TaskDataCategory::All, - "To read data of {:?} the task need to be accessed with this category \ - (It's accessed with {:?})", - category, - self.category - ); - } + fn check_access(&self, category: crate::backend::storage::SpecificTaskDataCategory) { + match category { + SpecificTaskDataCategory::Data => { + #[cfg(debug_assertions)] + debug_assert!( + self.category == TaskDataCategory::Data + || self.category == TaskDataCategory::All, + "To read data of {:?} the task need to be accessed with this category (It's \ + accessed with {:?})", + category, + self.category + ); + } + SpecificTaskDataCategory::Meta => { + #[cfg(debug_assertions)] + debug_assert!( + self.category == TaskDataCategory::Meta + || self.category == TaskDataCategory::All, + "To read data of {:?} the task need to be accessed with this category (It's \ + accessed with {:?})", + category, + self.category + ); } } } @@ -861,13 +960,35 @@ impl TaskGuard for TaskGuardImpl<'_, B> { return None; } self.task.flags.set_prefetched(true); - let map = iter_many!(self, OutputDependency { target } => (target, TaskDataCategory::Meta)) - .chain(iter_many!(self, CellDependency { target, key: _ } => (target.task, TaskDataCategory::All))) - .chain(iter_many!(self, CollectiblesDependency { target } => (target.task, TaskDataCategory::All))) - .chain(iter_many!(self, Child { task } => (task, TaskDataCategory::All))) + let map = self + .iter_output_dependencies() + .map(|target| (target, TaskDataCategory::Meta)) + .chain( + self.iter_cell_dependencies() + .map(|(target, _key)| (target.task, TaskDataCategory::All)), + ) + .chain( + self.iter_collectibles_dependencies() + .map(|target| (target.task, TaskDataCategory::All)), + ) + .chain( + self.iter_children() + .map(|task| (task, TaskDataCategory::All)), + ) .collect::>(); (map.len() > 1).then_some(map) } + + fn get_activeness_mut_or_insert_with(&mut self, f: F) -> &mut ActivenessState + where + F: FnOnce() -> ActivenessState, + { + if !self.has_activeness() { + self.set_activeness(f()); + } + self.get_activeness_mut() + .expect("activeness should exist after set") + } } impl<'a, B: BackingStorage> TaskStorageAccessors for TaskGuardImpl<'a, B> { @@ -885,7 +1006,7 @@ impl<'a, B: BackingStorage> TaskStorageAccessors for TaskGuardImpl<'a, B> { } } - fn check_access(&self, category: crate::backend::TaskDataCategory) { + fn check_access(&self, category: crate::backend::storage::SpecificTaskDataCategory) { self.check_access(category); } } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/prepare_new_children.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/prepare_new_children.rs index bf695796264fd..e31f67cc71125 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/prepare_new_children.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/prepare_new_children.rs @@ -3,11 +3,8 @@ use std::{cmp::max, num::NonZeroU32}; use rustc_hash::FxHashSet; use turbo_tasks::TaskId; -use crate::backend::{ - get, - operation::{ - AggregationUpdateJob, AggregationUpdateQueue, TaskGuard, is_aggregating_node, is_root_node, - }, +use crate::backend::operation::{ + AggregationUpdateJob, AggregationUpdateQueue, TaskGuard, is_aggregating_node, is_root_node, }; const AGGREGATION_NUMBER_BUFFER_SPACE: u32 = 3; @@ -22,7 +19,8 @@ pub fn prepare_new_children( let children_count = new_children.len(); // Compute future parent aggregation number based on the number of children - let current_parent_aggregation = get!(parent_task, AggregationNumber) + let current_parent_aggregation = parent_task + .get_aggregation_number() .copied() .unwrap_or_default(); let future_parent_aggregation = if is_root_node(current_parent_aggregation.base) { diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_cell.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_cell.rs index 4df884e8d5413..e9ccec3a7b5e3 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_cell.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_cell.rs @@ -12,14 +12,14 @@ use turbo_tasks::{CellId, FxIndexMap, TaskId, TypedSharedReference, backend::Cel use crate::backend::operation::invalidate::TaskDirtyCause; use crate::{ backend::{ - TaskDataCategory, TaskStorageAccessors, + TaskDataCategory, operation::{ AggregationUpdateQueue, ExecuteContext, Operation, TaskGuard, invalidate::make_task_dirty_internal, }, - storage::{iter_many, remove}, + storage_schema::TaskStorageAccessors, }, - data::{CachedDataItem, CachedDataItemKey, CellRef}, + data::CellRef, }; #[derive(Encode, Decode, Clone, Default)] @@ -57,7 +57,7 @@ impl UpdateCellOperation { updated_key_hashes: Option>, #[cfg(feature = "verify_determinism")] verification_mode: VerificationMode, #[cfg(not(feature = "verify_determinism"))] _verification_mode: VerificationMode, - mut ctx: impl ExecuteContext, + mut ctx: impl ExecuteContext<'_>, ) { let content = if let CellContent(Some(new_content)) = content { Some(new_content.into_typed(cell.type_id)) @@ -70,8 +70,7 @@ impl UpdateCellOperation { // We need to detect recomputation, because here the content has not actually changed (even // if it's not equal to the old content, as not all values implement Eq). We have to // assume that tasks are deterministic and pure. - let assume_unchanged = - !ctx.should_track_dependencies() || !task.has_key(&CachedDataItemKey::Dirty {}); + let assume_unchanged = !ctx.should_track_dependencies() || !task.has_dirty(); if assume_unchanged { let has_old_content = task.has_cell_data(is_serializable_cell_content, cell); @@ -108,20 +107,21 @@ impl UpdateCellOperation { Lazy::new(|| updated_key_hashes.into_iter().collect::>()) }); - let tasks_with_keys = iter_many!( - task, - CellDependent { cell: dependent_cell, key, task } - if dependent_cell == cell && key.is_none_or(|key_hash| { - updated_key_hashes_set.as_ref().is_none_or(|set| { - set.contains(&key_hash) - }) + let tasks_with_keys = task + .iter_cell_dependents() + .filter(|&(dependent_cell, key, _)| { + dependent_cell == cell + && key.is_none_or(|key_hash| { + updated_key_hashes_set + .as_ref() + .is_none_or(|set| set.contains(&key_hash)) + }) }) - => (task, key) - ) - .filter(|&(dependent_task_id, _)| { - // once tasks are never invalidated - !ctx.is_once_task(dependent_task_id) - }); + .map(|(_, key, task)| (task, key)) + .filter(|&(dependent_task_id, _)| { + // once tasks are never invalidated + !ctx.is_once_task(dependent_task_id) + }); let mut dependent_tasks: FxIndexMap; 2]>> = FxIndexMap::default(); for (task, key) in tasks_with_keys { @@ -142,10 +142,7 @@ impl UpdateCellOperation { // tasks and after that set the new cell content. When the cell content is unset, // readers will wait for it to be set via InProgressCell. - let old_content = task.remove(&CachedDataItemKey::cell_data( - is_serializable_cell_content, - cell, - )); + let old_content = task.remove_cell_data(is_serializable_cell_content, cell); drop(task); drop(old_content); @@ -177,19 +174,12 @@ impl UpdateCellOperation { // So we can just update the cell content. let old_content = if let Some(new_content) = content { - task.insert(CachedDataItem::cell_data( - is_serializable_cell_content, - cell, - new_content, - )) + task.set_cell_data(is_serializable_cell_content, cell, new_content) } else { - task.remove(&CachedDataItemKey::cell_data( - is_serializable_cell_content, - cell, - )) + task.remove_cell_data(is_serializable_cell_content, cell) }; - let in_progress_cell = remove!(task, InProgressCell { cell }); + let in_progress_cell = task.remove_in_progress_cells(&cell); drop(task); drop(old_content); @@ -216,7 +206,7 @@ impl UpdateCellOperation { } impl Operation for UpdateCellOperation { - fn execute(mut self, ctx: &mut impl ExecuteContext) { + fn execute(mut self, ctx: &mut impl ExecuteContext<'_>) { loop { if self.is_serializable() { ctx.operation_suspend_point(&self); @@ -235,20 +225,14 @@ impl Operation for UpdateCellOperation { let mut make_stale = false; let dependent = ctx.task(dependent_task_id, TaskDataCategory::All); for key in keys.iter().copied() { - if dependent.has_key(&CachedDataItemKey::OutdatedCellDependency { - target: cell_ref, - key, - }) { + if dependent.outdated_cell_dependencies_contains(&(cell_ref, key)) { // cell dependency is outdated, so it hasn't read the cell yet // and doesn't need to be invalidated. // We do not need to make the task stale in this case. // But importantly we still need to make the task dirty as it should // no longer be considered as // "recomputation". - } else if !dependent.has_key(&CachedDataItemKey::CellDependency { - target: cell_ref, - key, - }) { + } else if !dependent.cell_dependencies_contains(&(cell_ref, key)) { // cell dependency has been removed, so the task doesn't depend on // the cell anymore and doesn't need // to be invalidated @@ -288,14 +272,10 @@ impl Operation for UpdateCellOperation { let mut task = ctx.task(task, TaskDataCategory::Data); if let Some(content) = content { - task.add_new(CachedDataItem::cell_data( - is_serializable_cell_content, - cell, - content, - )); + task.add_cell_data(is_serializable_cell_content, cell, content); } - let in_progress_cell = remove!(task, InProgressCell { cell }); + let in_progress_cell = task.remove_in_progress_cells(&cell); drop(task); diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs index fb39b6939a969..4ce535b3e6969 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/operation/update_collectible.rs @@ -5,12 +5,12 @@ use turbo_tasks::TaskId; use crate::{ backend::{ - TaskDataCategory, get_many, + TaskDataCategory, operation::{ AggregatedDataUpdate, AggregationUpdateJob, AggregationUpdateQueue, ExecuteContext, Operation, get_aggregation_number, is_root_node, }, - storage::{get, update_count}, + storage_schema::TaskStorageAccessors, }, data::CollectibleRef, }; @@ -22,7 +22,7 @@ impl UpdateCollectibleOperation { task_id: TaskId, collectible: CollectibleRef, mut count: i32, - mut ctx: impl ExecuteContext, + mut ctx: impl ExecuteContext<'_>, ) { let mut task = ctx.task(task_id, TaskDataCategory::All); if count < 0 { @@ -52,32 +52,27 @@ impl UpdateCollectibleOperation { } } let mut queue = AggregationUpdateQueue::new(); - let outdated = get!(task, OutdatedCollectible { collectible }).copied(); + let outdated = task.get_outdated_collectibles(&collectible).copied(); if let Some(outdated) = outdated { if count > 0 && outdated > 0 { let shared = min(count, outdated); - update_count!(task, OutdatedCollectible { collectible }, -shared); + let _ = task.update_outdated_collectibles_positive_crossing(collectible, -shared); count -= shared; } else if count < 0 && outdated < 0 { let shared = min(-count, -outdated); - update_count!(task, OutdatedCollectible { collectible }, shared); + let _ = task.update_outdated_collectibles_positive_crossing(collectible, shared); count += shared; } else { // Not reduced from outdated } } if count != 0 { - if update_count!(task, Collectible { collectible }, count) { + if task.update_collectibles_positive_crossing(collectible, count) { let ty = collectible.collectible_type; - let dependent: SmallVec<[TaskId; 4]> = get_many!( - task, - CollectiblesDependent { - collectible_type, - task, - } if collectible_type == ty => { - task - } - ); + let dependent: SmallVec<[TaskId; 4]> = task + .iter_collectibles_dependents() + .filter_map(|(collectible_type, task)| (collectible_type == ty).then_some(task)) + .collect(); if !dependent.is_empty() { queue.push(AggregationUpdateJob::InvalidateDueToCollectiblesChange { task_ids: dependent, diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs b/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs index a3caec38404a2..67a462242f040 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/storage.rs @@ -64,6 +64,8 @@ enum ModifiedState { /// Snapshot(Some): /// It was modified before snapshot mode was entered and it was accessed again during snapshot /// mode. A copy of the version of the item when snapshot mode was entered is stored here. + /// The `TaskStorage` contains only persistent fields (via `clone_snapshot()`), and has + /// `meta_modified`/`data_modified` flags set to indicate which categories need serializing. /// Snapshot(None): /// It was not modified before snapshot mode was entered, but it was accessed during snapshot /// mode. Or the snapshot was already taken out by the snapshot operation. @@ -342,258 +344,6 @@ impl DerefMut for StorageWriteGuard<'_> { } } -// TODO: this implementation is only needed to bootstrap new tasks -impl super::storage_schema::TaskStorageAccessors for StorageWriteGuard<'_> { - fn typed(&self) -> &super::storage_schema::TaskStorage { - &self.inner - } - - fn typed_mut(&mut self) -> &mut super::storage_schema::TaskStorage { - &mut self.inner - } - - fn track_modification(&mut self, category: SpecificTaskDataCategory) { - // Delegate to the existing track_modification method - StorageWriteGuard::track_modification(self, category) - } - - fn check_access(&self, _category: super::TaskDataCategory) { - // StorageWriteGuard doesn't have category tracking - that's handled by TaskGuardImpl. - // This is a no-op for Stor - } -} - -macro_rules! count { - ($task:ident, $key:ident) => {{ $task.count($crate::data::CachedDataItemType::$key) }}; -} - -macro_rules! get { - ($task:ident, $key:ident $input:tt) => {{ - #[allow(unused_imports)] - use $crate::backend::storage_schema::TaskStorageAccessors; - if let Some($crate::data::CachedDataItemValueRef::$key { - value, - }) = $task.get(&$crate::data::CachedDataItemKey::$key $input) { - Some(value) - } else { - None - } - }}; - ($task:ident, $key:ident) => { - $crate::backend::storage::get!($task, $key {}) - }; -} - -macro_rules! get_mut { - ($task:ident, $key:ident $input:tt) => {{ - #[allow(unused_imports)] - use $crate::backend::storage_schema::TaskStorageAccessors; - if let Some($crate::data::CachedDataItemValueRefMut::$key { - value, - }) = $task.get_mut(&$crate::data::CachedDataItemKey::$key $input) { - let () = $crate::data::allow_mut_access::$key; - Some(value) - } else { - None - } - }}; - ($task:ident, $key:ident) => { - $crate::backend::storage::get_mut!($task, $key {}) - }; -} - -macro_rules! get_mut_or_insert_with { - ($task:ident, $key:ident $input:tt, $f:expr) => {{ - #[allow(unused_imports)] - use $crate::backend::operation::TaskGuard; - let () = $crate::data::allow_mut_access::$key; - let functor = $f; - let $crate::data::CachedDataItemValueRefMut::$key { - value, - } = $task.get_mut_or_insert_with($crate::data::CachedDataItemKey::$key $input, move || $crate::data::CachedDataItemValue::$key { value: functor() }) else { - unreachable!() - }; - value - }}; - ($task:ident, $key:ident, $f:expr) => { - $crate::backend::storage::get_mut_or_insert_with!($task, $key {}, $f) - }; -} - -/// Creates an iterator over all [`CachedDataItemKey::$key`][crate::data::CachedDataItemKey]s in -/// `$task` matching the given `$key_pattern`, optional `$value_pattern`, and optional `if $cond`. -/// -/// Each element in the iterator is determined by `$iter_item`, which may use fields extracted by -/// `$key_pattern` or `$value_pattern`. -macro_rules! iter_many { - ($task:ident, $key:ident $key_pattern:tt $(if $cond:expr)? => $iter_item:expr) => {{ - #[allow(unused_imports)] - use $crate::backend::storage_schema::TaskStorageAccessors; - $task - .iter($crate::data::CachedDataItemType::$key) - .filter_map(|(key, _)| match key { - $crate::data::CachedDataItemKey::$key $key_pattern $(if $cond)? => Some( - $iter_item - ), - _ => None, - }) - }}; - ($task:ident, $key:ident $input:tt $value_pattern:tt $(if $cond:expr)? => $iter_item:expr) => {{ - #[allow(unused_imports)] - use $crate::backend::storage_schema::TaskStorageAccessors; - $task - .iter($crate::data::CachedDataItemType::$key) - .filter_map(|(key, value)| match (key, value) { - ( - $crate::data::CachedDataItemKey::$key $input, - $crate::data::CachedDataItemValueRef::$key { value: $value_pattern } - ) $(if $cond)? => Some($iter_item), - _ => None, - }) - }}; -} - -/// A thin wrapper around [`iter_many`] that calls [`Iterator::collect`]. -/// -/// Note that the return type of [`Iterator::collect`] may be ambiguous in certain contexts, so -/// using this macro may require explicit type annotations on variables. -macro_rules! get_many { - ($($args:tt)*) => { - $crate::backend::storage::iter_many!($($args)*).collect() - }; -} - -macro_rules! update { - ($task:ident, $key:ident $input:tt, $update:expr) => {{ - #[allow(unused_imports)] - use $crate::backend::storage_schema::TaskStorageAccessors; - #[allow(unused_mut)] - let mut update = $update; - $task.update($crate::data::CachedDataItemKey::$key $input, |old| { - update(old.and_then(|old| { - if let $crate::data::CachedDataItemValue::$key { value } = old { - Some(value) - } else { - None - } - })) - .map(|new| $crate::data::CachedDataItemValue::$key { value: new }) - }) - }}; - ($task:ident, $key:ident, $update:expr) => { - $crate::backend::storage::update!($task, $key {}, $update) - }; -} - -macro_rules! update_count { - ($task:ident, $key:ident $input:tt, -$update:expr) => { - match $update { - update => { - let mut state_change = false; - $crate::backend::storage::update!($task, $key $input, |old: Option<_>| { - #[allow(unused_comparisons, reason = "type of update might be unsigned, where update < 0 is always false")] - if let Some(old) = old { - let new = old - update; - state_change = old <= 0 && new > 0 || old > 0 && new <= 0; - (new != 0).then_some(new) - } else { - state_change = update < 0; - (update != 0).then_some(-update) - } - }); - state_change - } - } - }; - ($task:ident, $key:ident $input:tt, $update:expr) => { - match $update { - update => { - let mut state_change = false; - $crate::backend::storage::update!($task, $key $input, |old: Option<_>| { - if let Some(old) = old { - let new = old + update; - state_change = old <= 0 && new > 0 || old > 0 && new <= 0; - (new != 0).then_some(new) - } else { - state_change = update > 0; - (update != 0).then_some(update) - } - }); - state_change - } - } - }; - ($task:ident, $key:ident, -$update:expr) => { - $crate::backend::storage::update_count!($task, $key {}, -$update) - }; - ($task:ident, $key:ident, $update:expr) => { - $crate::backend::storage::update_count!($task, $key {}, $update) - }; -} - -macro_rules! update_count_and_get { - ($task:ident, $key:ident $input:tt, -$update:expr) => { - match $update { - update => { - let mut new = 0; - $crate::backend::storage::update!($task, $key $input, |old: Option<_>| { - let old = old.unwrap_or(0); - new = old - update; - (new != 0).then_some(new) - }); - new - } - } - }; - ($task:ident, $key:ident $input:tt, $update:expr) => { - match $update { - update => { - let mut new = 0; - $crate::backend::storage::update!($task, $key $input, |old: Option<_>| { - let old = old.unwrap_or(0); - new = old + update; - (new != 0).then_some(new) - }); - new - } - } - }; - ($task:ident, $key:ident, -$update:expr) => { - $crate::backend::storage::update_count_and_get!($task, $key {}, -$update) - }; - ($task:ident, $key:ident, $update:expr) => { - $crate::backend::storage::update_count_and_get!($task, $key {}, $update) - }; -} - -macro_rules! remove { - ($task:ident, $key:ident $input:tt) => {{ - #[allow(unused_imports)] - use $crate::backend::storage_schema::TaskStorageAccessors; - if let Some($crate::data::CachedDataItemValue::$key { value }) = $task.remove( - &$crate::data::CachedDataItemKey::$key $input - ) { - Some(value) - } else { - None - } - }}; - ($task:ident, $key:ident) => { - $crate::backend::storage::remove!($task, $key {}) - }; -} - -pub(crate) use count; -pub(crate) use get; -pub(crate) use get_many; -pub(crate) use get_mut; -pub(crate) use get_mut_or_insert_with; -pub(crate) use iter_many; -pub(crate) use remove; -pub(crate) use update; -pub(crate) use update_count; -pub(crate) use update_count_and_get; - pub struct SnapshotGuard<'l> { storage: &'l Storage, } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/storage_schema.rs b/turbopack/crates/turbo-tasks-backend/src/backend/storage_schema.rs index e6b06810f446b..71f4beff39416 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/storage_schema.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/storage_schema.rs @@ -18,14 +18,15 @@ //! - `meta` - Rarely changed metadata (output, aggregation, flags) //! - `transient` - Not serialized, only exists in memory use turbo_tasks::{ - CellId, SharedReference, TaskId, TraitTypeId, TypedSharedReference, ValueTypeId, task_storage, + CellId, SharedReference, TaskExecutionReason, TaskId, TraitTypeId, TypedSharedReference, + ValueTypeId, event::Event, task_storage, }; use crate::{ backend::counter_map::CounterMap, data::{ ActivenessState, AggregationNumber, CellRef, CollectibleRef, CollectiblesRef, Dirtyness, - InProgressCellState, InProgressState, LeafDistance, OutputValue, + InProgressCellState, InProgressState, LeafDistance, OutputValue, RootType, }, }; @@ -56,91 +57,42 @@ struct TaskStorageSchema { // INLINE FIELDS (hot path, always allocated inline) // ========================================================================= /// The task's distance for prioritizing invalidation execution - #[field( - storage = "direct", - category = "data", - inline, - default, - variant = "LeafDistance" - )] + #[field(storage = "direct", category = "data", inline, default)] pub leaf_distance: LeafDistance, /// The task's aggregation number for the aggregation tree. /// Uses Default::default() semantics - a zero aggregation number means "not set". - #[field( - storage = "direct", - category = "meta", - inline, - default, - variant = "AggregationNumber" - )] + #[field(storage = "direct", category = "meta", inline, default)] pub aggregation_number: AggregationNumber, /// Tasks that depend on this task's output. - #[field( - storage = "auto_set", - category = "data", - inline, - filter_transient, - variant = "OutputDependent", - key_field = "task" - )] + #[field(storage = "auto_set", category = "data", inline, filter_transient)] pub output_dependent: AutoSet, /// The task's output value. /// Filtered during serialization to skip transient outputs (referencing transient tasks). - #[field( - storage = "direct", - category = "meta", - inline, - filter_transient, - variant = "Output" - )] + #[field(storage = "direct", category = "meta", inline, filter_transient)] pub output: Option, /// Upper nodes in the aggregation tree (reference counted). - #[field( - storage = "counter_map", - category = "meta", - inline, - filter_transient, - variant = "Upper", - key_field = "task" - )] + #[field(storage = "counter_map", category = "meta", inline, filter_transient)] pub upper: CounterMap, // ========================================================================= // COLLECTIBLES (meta) // ========================================================================= /// Collectibles emitted by this task (reference counted). - #[field( - storage = "counter_map", - category = "meta", - filter_transient, - variant = "Collectible", - key_field = "collectible" - )] + #[field(storage = "counter_map", category = "meta", filter_transient)] pub collectibles: CounterMap, /// Aggregated collectibles from the subgraph. - #[field( - storage = "counter_map", - category = "meta", - filter_transient, - variant = "AggregatedCollectible", - key_field = "collectible" - )] + #[field(storage = "counter_map", category = "meta", filter_transient)] pub aggregated_collectibles: CounterMap, /// Outdated collectibles to be cleaned up (transient). - #[field( - storage = "counter_map", - category = "transient", - variant = "OutdatedCollectible", - key_field = "collectible" - )] + #[field(storage = "counter_map", category = "transient")] pub outdated_collectibles: CounterMap, // ========================================================================= @@ -149,44 +101,25 @@ struct TaskStorageSchema { // ========================================================================= /// Whether the task is dirty (needs re-execution). /// Absent = clean, present = dirty with the specified Dirtyness state. - #[field(storage = "direct", category = "meta", variant = "Dirty")] + #[field(storage = "direct", category = "meta")] pub dirty: Dirtyness, /// Count of dirty containers in the aggregated subgraph. /// Absent = 0, present = actual count. - #[field( - storage = "direct", - category = "meta", - variant = "AggregatedDirtyContainerCount" - )] + #[field(storage = "direct", category = "meta")] pub aggregated_dirty_container_count: i32, /// Individual dirty containers in the aggregated subgraph. - #[field( - storage = "counter_map", - category = "meta", - filter_transient, - variant = "AggregatedDirtyContainer", - key_field = "task" - )] + #[field(storage = "counter_map", category = "meta", filter_transient)] pub aggregated_dirty_containers: CounterMap, /// Count of clean containers in current session (transient). /// Absent = 0, present = actual count. - #[field( - storage = "direct", - category = "transient", - variant = "AggregatedCurrentSessionCleanContainerCount" - )] + #[field(storage = "direct", category = "transient")] pub aggregated_current_session_clean_container_count: i32, /// Individual clean containers in current session (transient). - #[field( - storage = "counter_map", - category = "transient", - variant = "AggregatedCurrentSessionCleanContainer", - key_field = "task" - )] + #[field(storage = "counter_map", category = "transient")] pub aggregated_current_session_clean_containers: CounterMap, // ========================================================================= @@ -194,19 +127,15 @@ struct TaskStorageSchema { // Persisted flags come first, then transient flags. // ========================================================================= /// Whether the task has an invalidator. - #[field(storage = "flag", category = "meta", variant = "HasInvalidator")] + #[field(storage = "flag", category = "meta")] pub invalidator: bool, /// Whether the task output is immutable (persisted). - #[field(storage = "flag", category = "meta", variant = "Immutable")] + #[field(storage = "flag", category = "meta")] pub immutable: bool, /// Whether clean in current session (transient flag). - #[field( - storage = "flag", - category = "transient", - variant = "CurrentSessionClean" - )] + #[field(storage = "flag", category = "transient")] pub current_session_clean: bool, // ========================================================================= @@ -245,156 +174,79 @@ struct TaskStorageSchema { // CHILDREN & AGGREGATION (meta) // ========================================================================= /// Child tasks of this task. - #[field( - storage = "auto_set", - category = "meta", - filter_transient, - variant = "Child", - key_field = "task" - )] + #[field(storage = "auto_set", category = "meta", filter_transient)] pub children: AutoSet, /// Follower nodes in the aggregation tree (reference counted). - #[field( - storage = "counter_map", - category = "meta", - filter_transient, - variant = "Follower", - key_field = "task" - )] + #[field(storage = "counter_map", category = "meta", filter_transient)] pub followers: CounterMap, // ========================================================================= // DEPENDENCIES (data) // ========================================================================= - #[field( - storage = "auto_set", - category = "data", - filter_transient, - variant = "OutputDependency", - key_field = "target" - )] + #[field(storage = "auto_set", category = "data", filter_transient)] pub output_dependencies: AutoSet, /// Cells this task depends on. - #[field( - storage = "auto_set", - category = "data", - filter_transient, - variant = "CellDependency", - key_field = "target, key" - )] + #[field(storage = "auto_set", category = "data", filter_transient)] pub cell_dependencies: AutoSet<(CellRef, Option)>, /// Collectibles this task depends on. - #[field( - storage = "auto_set", - category = "data", - filter_transient, - variant = "CollectiblesDependency", - key_field = "target" - )] + #[field(storage = "auto_set", category = "data", filter_transient)] pub collectibles_dependencies: AutoSet, /// Outdated output dependencies to be cleaned up (transient). - #[field( - storage = "auto_set", - category = "transient", - variant = "OutdatedOutputDependency", - key_field = "target" - )] + #[field(storage = "auto_set", category = "transient")] pub outdated_output_dependencies: AutoSet, /// Outdated cell dependencies to be cleaned up (transient). - #[field( - storage = "auto_set", - category = "transient", - variant = "OutdatedCellDependency", - key_field = "target, key" - )] + #[field(storage = "auto_set", category = "transient")] pub outdated_cell_dependencies: AutoSet<(CellRef, Option)>, /// Outdated collectibles dependencies to be cleaned up (transient). - #[field( - storage = "auto_set", - category = "transient", - variant = "OutdatedCollectiblesDependency", - key_field = "target" - )] + #[field(storage = "auto_set", category = "transient")] pub outdated_collectibles_dependencies: AutoSet, // ========================================================================= // DEPENDENTS - Tasks that depend on this task's cells // ========================================================================= - #[field( - storage = "auto_set", - category = "data", - variant = "CellDependent", - key_field = "cell, key, task", - filter_transient - )] + #[field(storage = "auto_set", category = "data", filter_transient)] pub cell_dependents: AutoSet<(CellId, Option, TaskId)>, /// Tasks that depend on collectibles of a specific type from this task. /// Maps TraitTypeId -> Set - #[field( - storage = "auto_set", - category = "meta", - variant = "CollectiblesDependent", - key_field = "collectible_type, task", - filter_transient - )] + #[field(storage = "auto_set", category = "meta", filter_transient)] pub collectibles_dependents: AutoSet<(TraitTypeId, TaskId)>, // ========================================================================= // CELL DATA (data) // ========================================================================= /// Persistent cell data (serializable). - #[field( - storage = "auto_map", - category = "data", - variant = "CellData", - key_field = "cell" - )] - pub cell_data: AutoMap, + #[field(storage = "auto_map", category = "data")] + pub persistent_cell_data: AutoMap, /// Transient cell data (not serializable). - #[field( - storage = "auto_map", - category = "transient", - variant = "TransientCellData", - key_field = "cell" - )] + #[field(storage = "auto_map", category = "transient")] pub transient_cell_data: AutoMap, /// Maximum cell index per cell type. - #[field( - storage = "auto_map", - category = "data", - variant = "CellTypeMaxIndex", - key_field = "cell_type" - )] + #[field(storage = "auto_map", category = "data")] pub cell_type_max_index: AutoMap, // ========================================================================= // TRANSIENT EXECUTION STATE (transient) // ========================================================================= /// Activeness state for root/once tasks (transient). - #[field(storage = "direct", category = "transient", variant = "Activeness")] + #[field(storage = "direct", category = "transient")] pub activeness: ActivenessState, /// In-progress execution state (transient). - #[field(storage = "direct", category = "transient", variant = "InProgress")] + #[field(storage = "direct", category = "transient")] pub in_progress: InProgressState, /// In-progress cell state for cells being computed (transient). - #[field( - storage = "auto_map", - category = "transient", - variant = "InProgressCell", - key_field = "cell" - )] + #[field(storage = "auto_map", category = "transient")] pub in_progress_cells: AutoMap, } @@ -597,6 +449,44 @@ impl TaskStorage { SpecificTaskDataCategory::Data => self.clone_data_snapshot(), } } + + /// Initialize a transient task with the given root type and activeness tracking. + /// + /// This sets up the activeness state for root/once tasks. + /// Called when creating transient tasks via `create_transient_task`. + pub fn init_transient_task( + &mut self, + task_id: TaskId, + root_type: RootType, + should_track_activeness: bool, + ) { + // Mark as fully restored since transient tasks don't need restoration from disk + self.flags.set_restored(TaskDataCategory::All); + + // This is a root (or once) task. These tasks use the max aggregation number. + self.aggregation_number = AggregationNumber { + base: u32::MAX, + distance: 0, + effective: u32::MAX, + }; + + if should_track_activeness { + let activeness = ActivenessState::new_root(root_type, task_id); + self.lazy.push(LazyField::Activeness(activeness)); + } + + // Set the task as scheduled so it can be executed + let done_event = Event::new(move || { + move || match root_type { + RootType::RootTask => "Root Task".to_string(), + RootType::OnceTask => "Once Task".to_string(), + } + }); + self.set_in_progress(InProgressState::Scheduled { + done_event, + reason: TaskExecutionReason::Initial, + }); + } } // Support serialization filtering for CellDependents and CollectibleDependents @@ -628,38 +518,7 @@ mod tests { use turbo_tasks::{CellId, TaskId}; use super::*; - use crate::{ - backend::storage::SpecificTaskDataCategory, - data::{AggregationNumber, CellRef, Dirtyness, OutputValue}, - }; - - /// Test wrapper that implements TaskStorageAccessors for testing the CachedDataItem adapter. - /// This wrapper doesn't track modifications since we're just testing functionality. - struct TestStorage(TaskStorage); - - impl TestStorage { - fn new() -> Self { - Self(TaskStorage::new()) - } - } - - impl TaskStorageAccessors for TestStorage { - fn typed(&self) -> &TaskStorage { - &self.0 - } - - fn typed_mut(&mut self) -> &mut TaskStorage { - &mut self.0 - } - - fn track_modification(&mut self, _category: SpecificTaskDataCategory) { - // No-op for tests - } - - fn check_access(&self, _category: crate::backend::TaskDataCategory) { - // No-op for tests - } - } + use crate::data::{AggregationNumber, CellRef, Dirtyness, OutputValue}; #[test] fn test_accessors() { @@ -1070,179 +929,4 @@ mod tests { "TaskStorage size changed! If this is intentional, update this test." ); } - - // ========================================================================== - // CachedDataItem Adapter Tests - // ========================================================================== - - #[test] - fn test_adapter_basic() { - use crate::data::{CachedDataItem, CachedDataItemKey, CachedDataItemValue}; - - let mut storage = TestStorage::new(); - let task1 = unsafe { TaskId::new_unchecked(1) }; - let task2 = unsafe { TaskId::new_unchecked(2) }; - - // Test add for inline direct field (Output) - let output_item = CachedDataItem::Output { - value: OutputValue::Output(task1), - }; - assert!(storage.add(output_item.clone())); - assert!(!storage.add(output_item)); // Second add should return false - - // Test get for Output - let key = CachedDataItemKey::Output {}; - assert!(storage.get(&key).is_some()); - assert!(storage.contains_key(&key)); - - // Test remove for Output - let removed = storage.remove(&key); - assert!(matches!(removed, Some(CachedDataItemValue::Output { .. }))); - assert!(storage.get(&key).is_none()); - - // Test add for inline AutoSet field (OutputDependent) - let dep_item = CachedDataItem::OutputDependent { - task: task1, - value: (), - }; - assert!(storage.add(dep_item.clone())); - assert!(!storage.add(dep_item)); // Already exists - - // Verify via typed accessor - assert!(storage.typed().output_dependent().contains(&task1)); - - // Test add another - let dep_item2 = CachedDataItem::OutputDependent { - task: task2, - value: (), - }; - assert!(storage.add(dep_item2)); - assert_eq!(storage.typed().output_dependent().len(), 2); - - // Test remove OutputDependent - let key = CachedDataItemKey::OutputDependent { task: task1 }; - let removed = storage.remove(&key); - assert!(matches!( - removed, - Some(CachedDataItemValue::OutputDependent { value: () }) - )); - assert_eq!(storage.typed().output_dependent().len(), 1); - assert!(!storage.typed().output_dependent().contains(&task1)); - assert!(storage.typed().output_dependent().contains(&task2)); - } - - #[test] - fn test_adapter_flags() { - use crate::data::{CachedDataItem, CachedDataItemKey, CachedDataItemValue}; - - let mut storage = TestStorage::new(); - - // Test flag field (Immutable) - let immutable_item = CachedDataItem::Immutable { value: () }; - assert!(storage.add(immutable_item.clone())); - assert!(storage.typed().flags.immutable()); - assert!(!storage.add(immutable_item)); // Already set - - // Test get for flag - let key = CachedDataItemKey::Immutable {}; - assert!(storage.get(&key).is_some()); - - // Test remove for flag - let removed = storage.remove(&key); - assert!(matches!( - removed, - Some(CachedDataItemValue::Immutable { value: () }) - )); - assert!(!storage.typed().flags.immutable()); - - // Removing again should return None - assert!(storage.remove(&key).is_none()); - } - - #[test] - fn test_adapter_counter_map() { - use crate::data::{CachedDataItem, CachedDataItemKey, CachedDataItemValue}; - - let mut storage = TestStorage::new(); - let task1 = unsafe { TaskId::new_unchecked(1) }; - let task2 = unsafe { TaskId::new_unchecked(2) }; - - // Test inline CounterMap field (Upper) - let upper_item = CachedDataItem::Upper { - task: task1, - value: 5, - }; - assert!(storage.add(upper_item)); - assert_eq!(storage.upper().get(&task1), Some(&5)); - - // Update the value (insert returns old value) - let upper_update = CachedDataItem::Upper { - task: task1, - value: 10, - }; - let old = storage.insert(upper_update); - assert!(matches!(old, Some(CachedDataItemValue::Upper { value: 5 }))); - assert_eq!(storage.upper().get(&task1), Some(&10)); - - // Test lazy CounterMap field (Follower) - let follower_item = CachedDataItem::Follower { - task: task2, - value: 3, - }; - assert!(storage.add(follower_item)); - assert_eq!(storage.followers().unwrap().get(&task2), Some(&3)); - - // Test remove Follower - let key = CachedDataItemKey::Follower { task: task2 }; - let removed = storage.remove(&key); - assert!(matches!( - removed, - Some(CachedDataItemValue::Follower { value: 3 }) - )); - // After removal, followers map might be empty or the key just removed - assert!(storage.followers().is_none_or(|f| f.get(&task2).is_none())); - } - - #[test] - fn test_adapter_lazy_autoset() { - use crate::data::{CachedDataItem, CachedDataItemKey, CachedDataItemValue}; - - let mut storage = TestStorage::new(); - let task1 = unsafe { TaskId::new_unchecked(1) }; - let task2 = unsafe { TaskId::new_unchecked(2) }; - - // Test lazy AutoSet field (Child) - let child_item = CachedDataItem::Child { - task: task1, - value: (), - }; - assert!(storage.add(child_item)); - assert!(storage.children().is_some()); - assert!(storage.children().unwrap().contains(&task1)); - - // Add another child - let child_item2 = CachedDataItem::Child { - task: task2, - value: (), - }; - assert!(storage.add(child_item2)); - assert_eq!(storage.children().unwrap().len(), 2); - - // Test get - let key = CachedDataItemKey::Child { task: task1 }; - assert!(storage.get(&key).is_some()); - let key_missing = CachedDataItemKey::Child { - task: unsafe { TaskId::new_unchecked(999) }, - }; - assert!(storage.get(&key_missing).is_none()); - - // Test remove - let removed = storage.remove(&key); - assert!(matches!( - removed, - Some(CachedDataItemValue::Child { value: () }) - )); - assert_eq!(storage.children().unwrap().len(), 1); - assert!(!storage.children().unwrap().contains(&task1)); - } } diff --git a/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs b/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs index f448773a77a20..d52501947ab7d 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs @@ -95,8 +95,8 @@ pub trait BackingStorageSealed: 'static + Send + Sync { storage: &mut TaskStorage, ) -> Result<()>; - /// Batch lookup and decode data for multiple tasks directly into TaskStorage instances. - /// Returns a vector of TaskStorage, one for each task_id in the input slice. + /// Batch lookup and decode data for multiple tasks directly into TypedStorage instances. + /// Returns a vector of TypedStorage, one for each task_id in the input slice. /// # Safety /// /// `tx` must be a transaction from this BackingStorage instance. diff --git a/turbopack/crates/turbo-tasks-backend/src/data.rs b/turbopack/crates/turbo-tasks-backend/src/data.rs index 3f0323f891f11..9a254c12cc853 100644 --- a/turbopack/crates/turbo-tasks-backend/src/data.rs +++ b/turbopack/crates/turbo-tasks-backend/src/data.rs @@ -1,14 +1,11 @@ use bincode::{Decode, Encode}; use rustc_hash::FxHashSet; use turbo_tasks::{ - CellId, KeyValuePair, SharedReference, TaskExecutionReason, TaskId, TaskPriority, TraitTypeId, - TypedSharedReference, ValueTypeId, + CellId, TaskExecutionReason, TaskId, TaskPriority, TraitTypeId, backend::TurboTasksExecutionError, event::{Event, EventListener}, }; -use crate::backend::TaskDataCategory; - // this traits are needed for the transient variants of `CachedDataItem` // transient variants are never cloned or compared macro_rules! transient_traits { @@ -257,237 +254,8 @@ pub struct LeafDistance { pub max_distance_in_buffer: u32, } -#[derive(Debug, Clone, KeyValuePair, Encode, Decode)] -pub enum CachedDataItem { - // Output - Output { - value: OutputValue, - }, - Collectible { - collectible: CollectibleRef, - value: i32, - }, - - // State - Dirty { - value: Dirtyness, - }, - CurrentSessionClean { - // TODO: bgw: Add a way to skip the entire enum variant in bincode (generating an error - // upon attempted serialization) similar to #[serde(skip)] on variants - #[bincode(skip, default = "unreachable_decode")] - value: (), - }, - - // Children - Child { - task: TaskId, - value: (), - }, - - // Cells - CellData { - cell: CellId, - value: TypedSharedReference, - }, - TransientCellData { - #[bincode(skip, default = "unreachable_decode")] - cell: CellId, - #[bincode(skip, default = "unreachable_decode")] - value: SharedReference, - }, - CellTypeMaxIndex { - cell_type: ValueTypeId, - value: u32, - }, - - // Dependencies - OutputDependency { - target: TaskId, - value: (), - }, - CellDependency { - target: CellRef, - key: Option, - value: (), - }, - CollectiblesDependency { - target: CollectiblesRef, - value: (), - }, - - // Dependent - OutputDependent { - task: TaskId, - value: (), - }, - CellDependent { - cell: CellId, - key: Option, - task: TaskId, - value: (), - }, - CollectiblesDependent { - collectible_type: TraitTypeId, - task: TaskId, - value: (), - }, - - // Priority - LeafDistance { - value: LeafDistance, - }, - - // Aggregation Graph - AggregationNumber { - value: AggregationNumber, - }, - Follower { - task: TaskId, - value: u32, - }, - Upper { - task: TaskId, - value: u32, - }, - - // Aggregated Data - AggregatedDirtyContainer { - task: TaskId, - value: i32, - }, - AggregatedCurrentSessionCleanContainer { - #[bincode(skip, default = "unreachable_decode")] - task: TaskId, - #[bincode(skip, default = "unreachable_decode")] - value: i32, - }, - AggregatedCollectible { - collectible: CollectibleRef, - value: i32, - }, - AggregatedDirtyContainerCount { - value: i32, - }, - AggregatedCurrentSessionCleanContainerCount { - #[bincode(skip, default = "unreachable_decode")] - value: i32, - }, - - // Flags - HasInvalidator { - value: (), - }, - Immutable { - value: (), - }, - - // Transient Root Type - Activeness { - #[bincode(skip, default = "unreachable_decode")] - value: ActivenessState, - }, - - // Transient In Progress state - InProgress { - #[bincode(skip, default = "unreachable_decode")] - value: InProgressState, - }, - InProgressCell { - #[bincode(skip, default = "unreachable_decode")] - cell: CellId, - #[bincode(skip, default = "unreachable_decode")] - value: InProgressCellState, - }, - OutdatedCollectible { - #[bincode(skip, default = "unreachable_decode")] - collectible: CollectibleRef, - #[bincode(skip, default = "unreachable_decode")] - value: i32, - }, - OutdatedOutputDependency { - #[bincode(skip, default = "unreachable_decode")] - target: TaskId, - #[bincode(skip, default = "unreachable_decode")] - value: (), - }, - OutdatedCellDependency { - #[bincode(skip, default = "unreachable_decode")] - target: CellRef, - #[bincode(skip, default = "unreachable_decode")] - key: Option, - #[bincode(skip, default = "unreachable_decode")] - value: (), - }, - OutdatedCollectiblesDependency { - #[bincode(skip, default = "unreachable_decode")] - target: CollectiblesRef, - #[bincode(skip, default = "unreachable_decode")] - value: (), - }, -} - -fn unreachable_decode() -> T { - unreachable!("CachedDataItem variant should not have been encoded, cannot decode") -} - -impl CachedDataItem { - pub fn cell_data( - is_serializable_cell_content: bool, - cell: CellId, - value: TypedSharedReference, - ) -> Self { - if is_serializable_cell_content { - CachedDataItem::CellData { cell, value } - } else { - CachedDataItem::TransientCellData { - cell, - value: value.into_untyped(), - } - } - } - - pub fn is_persistent(&self) -> bool { - match self { - CachedDataItem::Output { value } => value.is_transient(), - CachedDataItem::Collectible { collectible, .. } => { - !collectible.cell.task.is_transient() - } - CachedDataItem::Dirty { .. } => true, - CachedDataItem::CurrentSessionClean { .. } => false, - CachedDataItem::Child { task, .. } => !task.is_transient(), - CachedDataItem::CellData { .. } => true, - CachedDataItem::TransientCellData { .. } => false, - CachedDataItem::CellTypeMaxIndex { .. } => true, - CachedDataItem::OutputDependency { target, .. } => !target.is_transient(), - CachedDataItem::CellDependency { target, .. } => !target.task.is_transient(), - CachedDataItem::CollectiblesDependency { target, .. } => !target.task.is_transient(), - CachedDataItem::OutputDependent { task, .. } => !task.is_transient(), - CachedDataItem::CellDependent { task, .. } => !task.is_transient(), - CachedDataItem::CollectiblesDependent { task, .. } => !task.is_transient(), - CachedDataItem::AggregationNumber { .. } => true, - CachedDataItem::Follower { task, .. } => !task.is_transient(), - CachedDataItem::Upper { task, .. } => !task.is_transient(), - CachedDataItem::LeafDistance { .. } => true, - CachedDataItem::AggregatedDirtyContainer { task, .. } => !task.is_transient(), - CachedDataItem::AggregatedCurrentSessionCleanContainer { .. } => false, - CachedDataItem::AggregatedCollectible { collectible, .. } => { - !collectible.cell.task.is_transient() - } - CachedDataItem::AggregatedDirtyContainerCount { .. } => true, - CachedDataItem::AggregatedCurrentSessionCleanContainerCount { .. } => false, - CachedDataItem::HasInvalidator { .. } => true, - CachedDataItem::Immutable { .. } => true, - CachedDataItem::Activeness { .. } => false, - CachedDataItem::InProgress { .. } => false, - CachedDataItem::InProgressCell { .. } => false, - CachedDataItem::OutdatedCollectible { .. } => false, - CachedDataItem::OutdatedOutputDependency { .. } => false, - CachedDataItem::OutdatedCellDependency { .. } => false, - CachedDataItem::OutdatedCollectiblesDependency { .. } => false, - } - } - +impl InProgressState { + /// Create a new scheduled state with a done event. pub fn new_scheduled( reason: TaskExecutionReason, description: impl FnOnce() -> InnerFnDescription, @@ -499,9 +267,7 @@ impl CachedDataItem { let inner = description(); move || format!("{} done_event", inner()) }); - CachedDataItem::InProgress { - value: InProgressState::Scheduled { done_event, reason }, - } + InProgressState::Scheduled { done_event, reason } } pub fn new_scheduled_with_listener( @@ -518,192 +284,9 @@ impl CachedDataItem { move || format!("{} done_event", inner()) }); let listener = done_event.listen_with_note(note); - ( - CachedDataItem::InProgress { - value: InProgressState::Scheduled { done_event, reason }, - }, - listener, - ) - } - - pub fn category(&self) -> TaskDataCategory { - match self { - Self::CellData { .. } - | Self::CellTypeMaxIndex { .. } - | Self::OutputDependency { .. } - | Self::CellDependency { .. } - | Self::CollectiblesDependency { .. } - | Self::OutputDependent { .. } - | Self::CellDependent { .. } - | Self::LeafDistance { .. } => TaskDataCategory::Data, - - Self::Collectible { .. } - | Self::Output { .. } - | Self::AggregationNumber { .. } - | Self::Dirty { .. } - | Self::Follower { .. } - | Self::Child { .. } - | Self::Upper { .. } - | Self::AggregatedDirtyContainer { .. } - | Self::AggregatedCollectible { .. } - | Self::AggregatedDirtyContainerCount { .. } - | Self::HasInvalidator { .. } - | Self::Immutable { .. } - | Self::CollectiblesDependent { .. } => TaskDataCategory::Meta, - - Self::OutdatedCollectible { .. } - | Self::OutdatedOutputDependency { .. } - | Self::OutdatedCellDependency { .. } - | Self::OutdatedCollectiblesDependency { .. } - | Self::TransientCellData { .. } - | Self::CurrentSessionClean { .. } - | Self::AggregatedCurrentSessionCleanContainer { .. } - | Self::AggregatedCurrentSessionCleanContainerCount { .. } - | Self::InProgressCell { .. } - | Self::InProgress { .. } - | Self::Activeness { .. } => TaskDataCategory::All, - } - } - - pub fn is_optional(&self) -> bool { - matches!(self, CachedDataItem::CellData { .. }) - } -} - -impl CachedDataItemKey { - pub fn cell_data(is_serializable_cell_content: bool, cell: CellId) -> Self { - if is_serializable_cell_content { - CachedDataItemKey::CellData { cell } - } else { - CachedDataItemKey::TransientCellData { cell } - } - } - - pub fn is_persistent(&self) -> bool { - match self { - CachedDataItemKey::Output { .. } => true, - CachedDataItemKey::Collectible { collectible, .. } => { - !collectible.cell.task.is_transient() - } - CachedDataItemKey::Dirty { .. } => true, - CachedDataItemKey::CurrentSessionClean { .. } => false, - CachedDataItemKey::Child { task, .. } => !task.is_transient(), - CachedDataItemKey::CellData { .. } => true, - CachedDataItemKey::TransientCellData { .. } => false, - CachedDataItemKey::CellTypeMaxIndex { .. } => true, - CachedDataItemKey::OutputDependency { target, .. } => !target.is_transient(), - CachedDataItemKey::CellDependency { target, .. } => !target.task.is_transient(), - CachedDataItemKey::CollectiblesDependency { target, .. } => !target.task.is_transient(), - CachedDataItemKey::OutputDependent { task, .. } => !task.is_transient(), - CachedDataItemKey::CellDependent { task, .. } => !task.is_transient(), - CachedDataItemKey::CollectiblesDependent { task, .. } => !task.is_transient(), - CachedDataItemKey::AggregationNumber { .. } => true, - CachedDataItemKey::Follower { task, .. } => !task.is_transient(), - CachedDataItemKey::Upper { task, .. } => !task.is_transient(), - CachedDataItemKey::LeafDistance { .. } => true, - CachedDataItemKey::AggregatedDirtyContainer { task, .. } => !task.is_transient(), - CachedDataItemKey::AggregatedCurrentSessionCleanContainer { .. } => false, - CachedDataItemKey::AggregatedCollectible { collectible, .. } => { - !collectible.cell.task.is_transient() - } - CachedDataItemKey::AggregatedDirtyContainerCount { .. } => true, - CachedDataItemKey::AggregatedCurrentSessionCleanContainerCount { .. } => false, - CachedDataItemKey::HasInvalidator { .. } => true, - CachedDataItemKey::Immutable { .. } => true, - CachedDataItemKey::Activeness { .. } => false, - CachedDataItemKey::InProgress { .. } => false, - CachedDataItemKey::InProgressCell { .. } => false, - CachedDataItemKey::OutdatedCollectible { .. } => false, - CachedDataItemKey::OutdatedOutputDependency { .. } => false, - CachedDataItemKey::OutdatedCellDependency { .. } => false, - CachedDataItemKey::OutdatedCollectiblesDependency { .. } => false, - } - } - - pub fn category(&self) -> TaskDataCategory { - self.ty().category() - } -} - -impl CachedDataItemType { - pub fn category(&self) -> TaskDataCategory { - match self { - Self::CellData { .. } - | Self::CellTypeMaxIndex { .. } - | Self::OutputDependency { .. } - | Self::CellDependency { .. } - | Self::CollectiblesDependency { .. } - | Self::OutputDependent { .. } - | Self::CellDependent { .. } - | Self::LeafDistance { .. } => TaskDataCategory::Data, - - Self::Collectible { .. } - | Self::Output { .. } - | Self::AggregationNumber { .. } - | Self::Dirty { .. } - | Self::Follower { .. } - | Self::Child { .. } - | Self::Upper { .. } - | Self::AggregatedDirtyContainer { .. } - | Self::AggregatedCollectible { .. } - | Self::AggregatedDirtyContainerCount { .. } - | Self::HasInvalidator { .. } - | Self::Immutable { .. } - | Self::CollectiblesDependent { .. } => TaskDataCategory::Meta, - - Self::OutdatedCollectible { .. } - | Self::OutdatedOutputDependency { .. } - | Self::OutdatedCellDependency { .. } - | Self::OutdatedCollectiblesDependency { .. } - | Self::TransientCellData { .. } - | Self::CurrentSessionClean { .. } - | Self::AggregatedCurrentSessionCleanContainer { .. } - | Self::AggregatedCurrentSessionCleanContainerCount { .. } - | Self::InProgressCell { .. } - | Self::InProgress { .. } - | Self::Activeness { .. } => TaskDataCategory::All, - } - } - - pub fn is_persistent(&self) -> bool { - match self { - Self::Output - | Self::Collectible - | Self::Dirty - | Self::Child - | Self::CellData - | Self::CellTypeMaxIndex - | Self::OutputDependency - | Self::CellDependency - | Self::CollectiblesDependency - | Self::OutputDependent - | Self::CellDependent - | Self::CollectiblesDependent - | Self::AggregationNumber - | Self::Follower - | Self::Upper - | Self::LeafDistance - | Self::AggregatedDirtyContainer - | Self::AggregatedCollectible - | Self::AggregatedDirtyContainerCount - | Self::HasInvalidator - | Self::Immutable => true, - - Self::Activeness - | Self::InProgress - | Self::InProgressCell - | Self::CurrentSessionClean - | Self::AggregatedCurrentSessionCleanContainer - | Self::AggregatedCurrentSessionCleanContainerCount - | Self::TransientCellData - | Self::OutdatedCollectible - | Self::OutdatedOutputDependency - | Self::OutdatedCellDependency - | Self::OutdatedCollectiblesDependency => false, - } + (InProgressState::Scheduled { done_event, reason }, listener) } } - /// Used by the [`get_mut`][crate::backend::storage::get_mut] macro to restrict mutable access to a /// subset of types. No mutable access should be allowed for persisted data, since that would break /// persisting. @@ -712,23 +295,3 @@ pub mod allow_mut_access { pub const InProgress: () = (); pub const Activeness: () = (); } - -impl CachedDataItemValueRef<'_> { - pub fn is_persistent(&self) -> bool { - match self { - CachedDataItemValueRef::Output { value } => !value.is_transient(), - _ => true, - } - } -} - -#[cfg(test)] -mod tests { - - #[test] - fn test_sizes() { - assert_eq!(std::mem::size_of::(), 40); - assert_eq!(std::mem::size_of::(), 32); - assert_eq!(std::mem::size_of::(), 32); - } -} diff --git a/turbopack/crates/turbo-tasks-backend/tests/recompute.rs b/turbopack/crates/turbo-tasks-backend/tests/recompute.rs index 39c2b261b6ff2..a00a966b73ddb 100644 --- a/turbopack/crates/turbo-tasks-backend/tests/recompute.rs +++ b/turbopack/crates/turbo-tasks-backend/tests/recompute.rs @@ -4,7 +4,7 @@ use anyhow::Result; use turbo_tasks::{ResolvedVc, State, Vc}; -use turbo_tasks_testing::{Registration, register, run_once}; +use turbo_tasks_testing::{Registration, register, run, run_once}; static REGISTRATION: Registration = register!(); @@ -135,3 +135,103 @@ async fn compute2(input: Vc) -> Result> { let state_value = *input.await?.state.get(); Ok(Vc::cell(state_value)) } + +// ============================================================================ +// recompute_dependency test - verifies dependent tasks re-execute correctly +// ============================================================================ + +/// Tests that when a task's dependency changes, both the inner and outer +/// tasks re-execute correctly and return updated values. +/// This tests the basic dependency propagation through a simple two-task chain. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn recompute_dependency() { + run(®ISTRATION, || async { + let input = get_dependency_input().resolve().await?; + // Reset state to 1 at the start of each iteration (important for multi-run tests) + input.await?.state.set(1); + + // Initial execution - establishes dependency chain: + // outer_compute -> inner_compute -> input.state + let output = outer_compute(input); + let read = output.strongly_consistent().await?; + println!( + "first read: value={}, inner_random={}, outer_random={}", + read.value, read.inner_random, read.outer_random + ); + assert_eq!(read.value, 1); + let prev_inner_random = read.inner_random; + let prev_outer_random = read.outer_random; + + // Change state - should invalidate inner_compute, + // which should then invalidate outer_compute + println!("changing input"); + input.await?.state.set(2); + + let read = output.strongly_consistent().await?; + println!( + "second read: value={}, inner_random={}, outer_random={}", + read.value, read.inner_random, read.outer_random + ); + + // Value should be updated + assert_eq!(read.value, 2); + + // Inner task should have re-executed (different random) + assert_ne!( + read.inner_random, prev_inner_random, + "inner_compute should have re-executed" + ); + + // CRITICAL: Outer task should ALSO have re-executed + // This is what the bug broke - outer_compute wasn't being + // invalidated because its output_dependent edge was removed + assert_ne!( + read.outer_random, prev_outer_random, + "outer_compute should have re-executed due to dependency on inner_compute" + ); + + anyhow::Ok(()) + }) + .await + .unwrap(); +} + +#[turbo_tasks::function] +fn get_dependency_input() -> Vc { + ChangingInput { + state: State::new(1), + } + .cell() +} + +#[turbo_tasks::value] +struct DependencyOutput { + value: u32, + inner_random: u32, + outer_random: u32, +} + +/// Inner task - reads state directly, returns value with embedded random +#[turbo_tasks::function] +async fn inner_compute(input: Vc) -> Result> { + println!("inner_compute()"); + let value = *input.await?.state.get(); + // Combine value with random to detect re-execution + // Value in lower 16 bits, random in upper 16 bits + Ok(Vc::cell(value | (rand::random::() << 16))) +} + +/// Outer task - depends on inner_compute +#[turbo_tasks::function] +async fn outer_compute(input: Vc) -> Result> { + println!("outer_compute()"); + let inner_result = *inner_compute(input).await?; + let value = inner_result & 0xFFFF; + let inner_random = inner_result >> 16; + Ok(DependencyOutput { + value, + inner_random, + outer_random: rand::random(), + } + .cell()) +} diff --git a/turbopack/crates/turbo-tasks-macros/src/derive/key_value_pair_macro.rs b/turbopack/crates/turbo-tasks-macros/src/derive/key_value_pair_macro.rs deleted file mode 100644 index 64d1ca3924151..0000000000000 --- a/turbopack/crates/turbo-tasks-macros/src/derive/key_value_pair_macro.rs +++ /dev/null @@ -1,350 +0,0 @@ -use proc_macro::TokenStream; -use quote::quote; -use syn::{Ident, ItemEnum, parse_macro_input}; - -pub fn derive_key_value_pair(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as ItemEnum); - - let ident = &input.ident; - let vis = &input.vis; - let type_name = Ident::new(&format!("{}Type", input.ident), input.ident.span()); - let key_name = Ident::new(&format!("{}Key", input.ident), input.ident.span()); - let value_name = Ident::new(&format!("{}Value", input.ident), input.ident.span()); - let value_ref_name = Ident::new(&format!("{}ValueRef", input.ident), input.ident.span()); - let value_ref_mut_name = Ident::new(&format!("{}ValueRefMut", input.ident), input.ident.span()); - - let variant_names = input - .variants - .iter() - .map(|variant| &variant.ident) - .collect::>(); - - let key_fields = input - .variants - .iter() - .map(|variant| { - variant - .fields - .iter() - .filter(|field| { - let Some(ident) = &field.ident else { - return false; - }; - ident != "value" - }) - .collect::>() - }) - .collect::>(); - - let value_fields = input - .variants - .iter() - .map(|variant| { - variant - .fields - .iter() - .filter(|field| { - let Some(ident) = &field.ident else { - return false; - }; - ident == "value" - }) - .collect::>() - }) - .collect::>(); - - let key_decl = field_declarations(&key_fields); - let key_pat = patterns(&key_fields); - let key_clone_fields = clone_fields(&key_fields); - - let value_decl = field_declarations(&value_fields); - let value_pat = patterns(&value_fields); - let value_clone_fields = clone_fields(&value_fields); - - let value_ref_decl = ref_field_declarations(&value_fields); - let value_ref_mut_decl = mut_ref_field_declarations(&value_fields); - let value_ref_fields = ref_fields(&value_fields); - - quote! { - #[automatically_derived] - impl turbo_tasks::KeyValuePair for #ident { - type Type = #type_name; - type Key = #key_name; - type Value = #value_name; - type ValueRef<'l> = #value_ref_name<'l> where Self: 'l; - type ValueRefMut<'l> = #value_ref_mut_name<'l> where Self: 'l; - - fn ty(&self) -> #type_name { - match self { - #( - #ident::#variant_names { .. } => #type_name::#variant_names, - )* - } - } - - fn key(&self) -> #key_name { - match self { - #( - #ident::#variant_names { #key_pat .. } => #key_name::#variant_names { #key_clone_fields }, - )* - } - } - - fn value(&self) -> #value_name { - match self { - #( - #ident::#variant_names { #value_pat .. } => #value_name::#variant_names { #value_clone_fields }, - )* - } - } - - fn value_ref(&self) -> #value_ref_name<'_> { - match self { - #( - #ident::#variant_names { #value_pat .. } => #value_ref_name::#variant_names { #value_ref_fields }, - )* - } - } - - fn value_mut(&mut self) -> #value_ref_mut_name<'_> { - match self { - #( - #ident::#variant_names { #value_pat .. } => #value_ref_mut_name::#variant_names { #value_ref_fields }, - )* - } - } - - fn from_key_and_value(key: #key_name, value: #value_name) -> Self { - match (key, value) { - #( - (#key_name::#variant_names { #key_pat }, #value_name::#variant_names { #value_pat }) => #ident::#variant_names { #key_pat #value_pat }, - )* - _ => panic!("Invalid key and value combination"), - } - } - - fn from_key_and_value_ref(key: #key_name, value_ref: #value_ref_name) -> Self { - match (key, value_ref) { - #( - (#key_name::#variant_names { #key_pat }, #value_ref_name::#variant_names { #value_pat }) => #ident::#variant_names { #key_pat #value_clone_fields }, - )* - _ => panic!("Invalid key and value combination"), - } - } - - fn into_key_and_value(self) -> (#key_name, #value_name) { - match self { - #( - #ident::#variant_names { #key_pat #value_pat } => (#key_name::#variant_names { #key_pat }, #value_name::#variant_names { #value_pat }), - )* - } - } - } - - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - #vis enum #type_name { - #( - #variant_names, - )* - } - - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] - #vis enum #key_name { - #( - #variant_names { - #key_decl - }, - )* - } - - #[derive(Debug, Clone, Default, PartialEq, Eq)] - #vis enum #value_name { - #( - #variant_names { - #value_decl - }, - )* - #[default] - Reserved, - } - - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - #vis enum #value_ref_name<'l> { - #( - #variant_names { - #value_ref_decl - }, - )* - } - - #[derive(Debug, PartialEq, Eq)] - #vis enum #value_ref_mut_name<'l> { - #( - #variant_names { - #value_ref_mut_decl - }, - )* - } - - #[automatically_derived] - impl #key_name { - pub fn ty(&self) -> #type_name { - match self { - #( - #key_name::#variant_names { .. } => #type_name::#variant_names, - )* - } - } - } - - #[automatically_derived] - impl #value_name { - pub fn as_ref(&self) -> #value_ref_name<'_> { - match self { - #( - #value_name::#variant_names { #value_pat .. } => #value_ref_name::#variant_names { #value_ref_fields }, - )* - #value_name::Reserved => unreachable!(), - } - } - - pub fn as_mut(&mut self) -> #value_ref_mut_name<'_> { - match self { - #( - #value_name::#variant_names { #value_pat .. } => #value_ref_mut_name::#variant_names { #value_ref_fields }, - )* - #value_name::Reserved => unreachable!(), - } - } - } - - } - .into() -} - -fn patterns(fields: &[Vec<&syn::Field>]) -> Vec { - fields - .iter() - .map(|fields| { - let pat = fields - .iter() - .map(|field| { - let ident = field.ident.as_ref().unwrap(); - quote! { - #ident - } - }) - .collect::>(); - quote! { - #(#pat,)* - } - }) - .collect::>() -} - -fn clone_fields(fields: &[Vec<&syn::Field>]) -> Vec { - fields - .iter() - .map(|fields| { - let pat = fields - .iter() - .map(|field| { - let ident = field.ident.as_ref().unwrap(); - quote! { - #ident: #ident.clone() - } - }) - .collect::>(); - quote! { - #(#pat,)* - } - }) - .collect::>() -} - -fn ref_fields(fields: &[Vec<&syn::Field>]) -> Vec { - fields - .iter() - .map(|fields| { - let pat = fields - .iter() - .map(|field| { - let ident = field.ident.as_ref().unwrap(); - quote! { - #ident - } - }) - .collect::>(); - quote! { - #(#pat,)* - } - }) - .collect::>() -} - -fn field_declarations(fields: &[Vec<&syn::Field>]) -> Vec { - fields - .iter() - .map(|fields| { - let fields = fields - .iter() - .map(|field| { - let ty = &field.ty; - let ident = field.ident.as_ref().unwrap(); - // we don't preserve attrs here because we don't copy over the derives, so the - // attributes are likely irrelevant to the generated type - quote! { - #ident: #ty - } - }) - .collect::>(); - quote! { - #(#fields),* - } - }) - .collect::>() -} - -fn ref_field_declarations(fields: &[Vec<&syn::Field>]) -> Vec { - fields - .iter() - .map(|fields| { - let fields = fields - .iter() - .map(|field| { - let ty = &field.ty; - let ident = field.ident.as_ref().unwrap(); - // don't preserve attrs because we don't copy over the derives either - quote! { - #ident: &'l #ty - } - }) - .collect::>(); - quote! { - #(#fields),* - } - }) - .collect::>() -} - -fn mut_ref_field_declarations(fields: &[Vec<&syn::Field>]) -> Vec { - fields - .iter() - .map(|fields| { - let fields = fields - .iter() - .map(|field| { - let ty = &field.ty; - let ident = field.ident.as_ref().unwrap(); - // don't preserve attrs because we don't copy over the derives either - quote! { - #ident: &'l mut #ty - } - }) - .collect::>(); - quote! { - #(#fields),* - } - }) - .collect::>() -} diff --git a/turbopack/crates/turbo-tasks-macros/src/derive/mod.rs b/turbopack/crates/turbo-tasks-macros/src/derive/mod.rs index 6cfc6633ba517..4708e4a506456 100644 --- a/turbopack/crates/turbo-tasks-macros/src/derive/mod.rs +++ b/turbopack/crates/turbo-tasks-macros/src/derive/mod.rs @@ -1,5 +1,4 @@ mod deterministic_hash_macro; -mod key_value_pair_macro; mod non_local_value_macro; mod operation_value_macro; mod task_input_macro; @@ -9,7 +8,6 @@ mod value_debug_format_macro; mod value_debug_macro; pub use deterministic_hash_macro::derive_deterministic_hash; -pub use key_value_pair_macro::derive_key_value_pair; pub use non_local_value_macro::derive_non_local_value; pub use operation_value_macro::derive_operation_value; use syn::{Attribute, Meta, Token, punctuated::Punctuated, spanned::Spanned}; diff --git a/turbopack/crates/turbo-tasks-macros/src/derive/task_storage_macro.rs b/turbopack/crates/turbo-tasks-macros/src/derive/task_storage_macro.rs index d9dc5f1ab676a..8a0934d809e9d 100644 --- a/turbopack/crates/turbo-tasks-macros/src/derive/task_storage_macro.rs +++ b/turbopack/crates/turbo-tasks-macros/src/derive/task_storage_macro.rs @@ -59,12 +59,6 @@ struct FieldInfo { /// If true, use Default::default() semantics instead of Option for inline direct fields. /// The field type should be T (not Option), and empty is represented by T::default(). use_default: bool, - /// The CachedDataItem variant name for adapter code generation. - /// If None, no adapter code is generated for this field. - cached_data_variant: Option, - /// The key field names in CachedDataItemKey for collection types. - /// E.g., ["task"], ["target", "key"], ["cell", "key", "task"]. - key_fields: Option>, } impl FieldInfo { @@ -73,16 +67,6 @@ impl FieldInfo { self.storage_type == StorageType::Flag } - /// Whether this field has a CachedDataItem variant (for adapter code generation). - fn has_cached_data_variant(&self) -> bool { - self.cached_data_variant.is_some() - } - - /// Get the CachedDataItem variant identifier (panics if none). - fn cached_data_variant_ident(&self) -> &Ident { - self.cached_data_variant.as_ref().unwrap() - } - /// Whether this field is transient (not serialized, in-memory only). fn is_transient(&self) -> bool { self.category == Category::Transient @@ -90,21 +74,33 @@ impl FieldInfo { /// Generate the full `self.check_access(...)` call for this field. fn check_access_call(&self) -> TokenStream { - if self.is_transient() { - quote! { self.check_access(crate::backend::TaskDataCategory::All); } - } else if self.category == Category::Meta { - quote! { self.check_access(crate::backend::TaskDataCategory::Meta); } - } else { - quote! { self.check_access(crate::backend::TaskDataCategory::Data); } + match self.category { + Category::Data => { + quote! { self.check_access(crate::backend::SpecificTaskDataCategory::Data); } + } + Category::Meta => { + quote! { self.check_access(crate::backend::SpecificTaskDataCategory::Meta); } + } + Category::Transient => quote! { + let _we_dont_check_access_for_transient_data = (); + }, } } /// Generate the full `self.track_modification(...)` call for this field. fn track_modification_call(&self) -> TokenStream { - if self.category == Category::Meta { - quote! { self.track_modification(crate::backend::storage::SpecificTaskDataCategory::Meta); } - } else { - quote! { self.track_modification(crate::backend::storage::SpecificTaskDataCategory::Data); } + match self.category { + Category::Data => { + quote! { self.track_modification(crate::backend::storage::SpecificTaskDataCategory::Data); } + } + Category::Meta => { + quote! { self.track_modification(crate::backend::storage::SpecificTaskDataCategory::Meta); } + } + Category::Transient => { + quote! { + let _we_dont_track_mutations_for_transient_data = (); + } + } } } @@ -308,89 +304,6 @@ impl FieldInfo { fn shrink_ident(&self) -> syn::Ident { self.prefixed_ident("shrink") } - - // ========================================================================= - // Key Fields Helpers (for CachedDataItem adapter code generation) - // ========================================================================= - - /// Generate the key pattern for match arms in CachedDataItemKey. - /// - /// For single key field: `{ task }` - /// For multiple key fields: `{ cell, key, task }` - fn key_pattern(&self) -> TokenStream { - match &self.key_fields { - None => quote! {}, - Some(fields) => { - quote! { #(#fields),* } - } - } - } - - /// Generate the key expression for use in set/map insert operations (moves values). - /// - /// For single key field: `task` - /// For multiple key fields: `(cell, key, task)` - fn key_expr(&self) -> TokenStream { - match &self.key_fields { - None => quote! {}, - Some(fields) if fields.len() == 1 => { - let f = &fields[0]; - quote! { #f } - } - Some(fields) => { - quote! { (#(#fields),*) } - } - } - } - - /// Generate the key expression for use when fields are bound by reference (e.g., in - /// get/contains). Clones the values to construct a tuple by value. - /// - /// For single key field: `target.clone()` - /// For multiple key fields: `(target.clone(), key.clone())` - fn key_expr_cloned(&self) -> TokenStream { - match &self.key_fields { - None => quote! {}, - Some(fields) if fields.len() == 1 => { - let f = &fields[0]; - quote! { #f.clone() } - } - Some(fields) => { - let cloned: Vec<_> = fields.iter().map(|f| quote! { #f.clone() }).collect(); - quote! { (#(#cloned),*) } - } - } - } - - /// Generate the key expression with clone/copy for constructing CachedDataItemKey. - /// - /// For single key field: `task: *task` - /// For multiple key fields: `cell: *cell, key: *key, task: *task` - fn key_construction(&self) -> TokenStream { - match &self.key_fields { - None => quote! {}, - Some(fields) => { - let constructs: Vec<_> = fields.iter().map(|f| quote! { #f: *#f }).collect(); - quote! { #(#constructs),* } - } - } - } - - /// Generate key construction for owned values (no dereference). - /// - /// For single key field: `task: task` - /// For multiple key fields: `cell: cell, key: key, task: task` - /// - /// Used when the key is already owned (e.g., from iter methods that return values not refs). - fn key_construction_owned(&self) -> TokenStream { - match &self.key_fields { - None => quote! {}, - Some(fields) => { - let constructs: Vec<_> = fields.iter().map(|f| quote! { #f: #f }).collect(); - quote! { #(#constructs),* } - } - } - } } #[derive(Debug, Clone, PartialEq, Eq)] @@ -439,8 +352,6 @@ fn parse_field_storage_attributes(field: &syn::Field) -> FieldInfo { let mut inline = false; // Default is lazy (not inline) let mut filter_transient = false; let mut use_default = false; - let mut cached_data_variant: Option = None; - let mut key_fields: Option> = None; // Find and parse the field attribute if let Some(attr) = field.attrs.iter().find(|attr| { @@ -515,31 +426,6 @@ fn parse_field_storage_attributes(field: &syn::Field) -> FieldInfo { }); } } - "variant" => { - if let syn::Expr::Lit(syn::ExprLit { - lit: syn::Lit::Str(lit_str), - .. - }) = &nv.value - { - cached_data_variant = - Some(Ident::new(&lit_str.value(), lit_str.span())); - } - } - "key_field" => { - if let syn::Expr::Lit(syn::ExprLit { - lit: syn::Lit::Str(lit_str), - .. - }) = &nv.value - { - let fields: Vec = lit_str - .value() - .split(',') - .map(|s| Ident::new(s.trim(), lit_str.span())) - .collect(); - key_fields = Some(fields); - } - } - other => { meta.span() .unwrap() @@ -643,8 +529,6 @@ fn parse_field_storage_attributes(field: &syn::Field) -> FieldInfo { lazy: !inline, // Default is lazy; inline = true means lazy = false filter_transient, use_default, - cached_data_variant, - key_fields, } } @@ -734,21 +618,6 @@ impl GroupedFields { .iter() .filter(move |f| !f.is_flag() && f.lazy && !f.is_transient() && f.category == category) } - - // ========================================================================= - // CachedDataItem adapter iterators - // ========================================================================= - - /// Returns an iterator over all fields that have a CachedDataItem variant. - /// These fields will generate adapter code for the CachedDataItem API. - fn fields_with_variant(&self) -> impl Iterator { - self.fields.iter().filter(|f| f.has_cached_data_variant()) - } - - /// Returns true if any fields have a CachedDataItem variant. - fn has_fields_with_variant(&self) -> bool { - self.fields.iter().any(|f| f.has_cached_data_variant()) - } } // ============================================================================= @@ -886,9 +755,9 @@ fn generate_task_flags_bitfield(grouped_fields: &GroupedFields) -> TokenStream { quote! { bitfield::bitfield! { - /// Combined bitfield for task flags. - /// Persisted flags are in the lower bits (0 to N-1). - /// Transient flags are in the higher bits (N and above). + #[doc = "Combined bitfield for task flags."] + #[doc = "Persisted flags are in the lower bits (0 to N-1)."] + #[doc = "Transient flags are in the higher bits (N and above)."] #[derive(Clone, Default, PartialEq, Eq)] pub struct TaskFlags(u16); impl Debug; @@ -898,25 +767,25 @@ fn generate_task_flags_bitfield(grouped_fields: &GroupedFields) -> TokenStream { #[automatically_derived] impl TaskFlags { - /// Mask for persisted flags (lower bits only) + #[doc = "Mask for persisted flags (lower bits only)"] pub const PERSISTED_MASK: u16 = #persisted_mask; - /// Get the raw bits value + #[doc = "Get the raw bits value"] pub fn bits(&self) -> u16 { self.0 } - /// Get only the persisted bits (for serialization) + #[doc = "Get only the persisted bits (for serialization)"] pub fn persisted_bits(&self) -> u16 { self.0 & Self::PERSISTED_MASK } - /// Set bits from a raw value, preserving transient flags + #[doc = "Set bits from a raw value, preserving transient flags"] pub fn set_persisted_bits(&mut self, bits: u16) { self.0 = (self.0 & !Self::PERSISTED_MASK) | (bits & Self::PERSISTED_MASK); } - /// Create from raw bits (for deserialization) + #[doc = "Create from raw bits (for deserialization)"] pub fn from_bits(bits: u16) -> Self { Self(bits) } @@ -994,8 +863,8 @@ fn generate_lazy_field_enum(grouped_fields: &GroupedFields) -> TokenStream { .collect(); quote! { - /// All lazily-allocated fields stored in a single Vec. - /// Fields are stored directly (unboxed) to avoid allocation overhead. + #[doc = "All lazily-allocated fields stored in a single Vec."] + #[doc = "Fields are stored directly (unboxed) to avoid allocation overhead."] #[automatically_derived] #[derive(Debug, Clone, PartialEq, turbo_tasks::ShrinkToFit)] #[shrink_to_fit(crate = "turbo_tasks::macro_helpers::shrink_to_fit")] @@ -1005,28 +874,28 @@ fn generate_lazy_field_enum(grouped_fields: &GroupedFields) -> TokenStream { #[automatically_derived] impl LazyField { - /// Returns true if this field is empty (can be removed from the Vec) + #[doc = "Returns true if this field is empty (can be removed from the Vec)"] pub fn is_empty(&self) -> bool { match self { #(#is_empty_arms),* } } - /// Returns true if this field should be persisted (not transient) + #[doc = "Returns true if this field should be persisted (not transient)"] pub fn is_persistent(&self) -> bool { match self { #(#is_persistent_arms),* } } - /// Returns true if this field belongs to the meta category + #[doc = "Returns true if this field belongs to the meta category"] pub fn is_meta(&self) -> bool { match self { #(#is_meta_arms),* } } - /// Returns true if this field belongs to the data category + #[doc = "Returns true if this field belongs to the data category"] pub fn is_data(&self) -> bool { !self.is_meta() } @@ -1055,7 +924,7 @@ fn generate_typed_storage_struct(grouped_fields: &GroupedFields) -> TokenStream // Add flags bitfield if needed (pub(crate) - used by TaskFlags methods) let flags_field = if has_flags { quote! { - /// Combined bitfield for boolean flags (persisted + transient) + #[doc = "Combined bitfield for boolean flags (persisted + transient)"] pub(crate) flags: TaskFlags, } } else { @@ -1066,7 +935,7 @@ fn generate_typed_storage_struct(grouped_fields: &GroupedFields) -> TokenStream // Note: Serialization is handled manually via encode_data/encode_meta methods let lazy_field = if has_lazy { quote! { - /// Lazily-allocated fields stored in a single Vec for memory efficiency + #[doc = "Lazily-allocated fields stored in a single Vec for memory efficiency"] lazy: Vec, } } else { @@ -1080,8 +949,8 @@ fn generate_typed_storage_struct(grouped_fields: &GroupedFields) -> TokenStream // Note: We don't derive bincode::Encode/Decode here since serialization // will be handled manually via encode_data/encode_meta/decode_data/decode_meta methods quote! { - /// Unified typed storage containing all task fields. - /// This is designed to be embedded in the actual InnerStorage for incremental migration. + #[doc = "Unified typed storage containing all task fields."] + #[doc = "This is designed to be embedded in the actual InnerStorage for incremental migration."] #[automatically_derived] #[derive(Debug, Default, PartialEq, turbo_tasks::ShrinkToFit)] #[shrink_to_fit(crate = "turbo_tasks::macro_helpers::shrink_to_fit")] @@ -1208,7 +1077,7 @@ fn generate_direct_field_accessors(field: &FieldInfo) -> TokenStream { self.find_lazy(#extractor) } - /// Set the field value, returning the old value if present. + #[doc = "Set the field value, returning the old value if present."] fn #set_name(&mut self, value: #field_type) -> Option<#field_type> { self.set_lazy(#matches_closure, #unwrap_owned, #constructor) } @@ -1217,10 +1086,10 @@ fn generate_direct_field_accessors(field: &FieldInfo) -> TokenStream { self.take_lazy(#matches_closure, #unwrap_owned) } - /// Get a mutable reference to the field value (if present). - /// - /// Unlike `get_or_create_lazy` for collections, this does NOT allocate - /// if the field is absent - it returns None instead. + #[doc = "Get a mutable reference to the field value (if present)."] + #[doc = ""] + #[doc = "Unlike `get_or_create_lazy` for collections, this does NOT allocate"] + #[doc = "if the field is absent - it returns None instead."] fn #get_mut_name(&mut self) -> Option<&mut #field_type> { self.find_lazy_mut(#extractor) } @@ -1236,6 +1105,7 @@ fn generate_collection_field_accessors( ) -> TokenStream { let ref_name = field.ref_ident(); let mut_name = field.mut_ident(); + let take_name = field.take_ident(); if field.is_inline() { // Inline: direct field access @@ -1247,6 +1117,10 @@ fn generate_collection_field_accessors( fn #mut_name(&mut self) -> &mut #field_type { &mut self.#field_name } + + fn #take_name(&mut self) -> #field_type { + std::mem::take(&mut self.#field_name) + } } } else { // Lazy: use find_lazy / get_or_create_lazy @@ -1267,6 +1141,13 @@ fn generate_collection_field_accessors( || #constructor, ) } + + fn #take_name(&mut self) -> Option<#field_type> { + self.take_lazy( + #matches_closure, + #unwrap_closure, + ) + } } } } @@ -1292,62 +1173,57 @@ fn generate_task_storage_accessors_trait(grouped_fields: &GroupedFields) -> Toke trait_methods.extend(generate_flag_trait_accessor_methods(field)); } - // Generate CachedDataItem adapter methods - let adapter_methods = generate_cached_data_adapter_trait_methods(grouped_fields); - quote! { - /// Trait for typed storage accessors. - /// - /// This trait is auto-generated by the TaskStorage macro. - /// Implementors only need to provide `typed()`, `typed_mut()`, `track_modification()`, - /// and `check_access()` methods, and all accessor methods are provided automatically. - /// - /// This is designed to work with TaskGuard. + #[doc = "Trait for typed storage accessors."] + #[doc = ""] + #[doc = "This trait is auto-generated by the TaskStorage macro."] + #[doc = "Implementors only need to provide `typed()`, `typed_mut()`, `track_modification()`,"] + #[doc = "and `check_access()` methods, and all accessor methods are provided automatically."] + #[doc = ""] + #[doc = "This is designed to work with TaskGuard."] #[automatically_derived] pub trait TaskStorageAccessors { - /// Access the typed storage (read-only) + #[doc = "Access the typed storage (read-only)"] fn typed(&self) -> &TaskStorage; - /// Access the typed storage (mutable). - /// - /// Note: This does NOT track modifications. Call `track_modification()` separately - /// when the data actually changes. This split allows generated accessors to - /// only track modifications when actual changes occur. + #[doc = "Access the typed storage (mutable)."] + #[doc = ""] + #[doc = "Note: This does NOT track modifications. Call `track_modification()` separately"] + #[doc = "when the data actually changes. This split allows generated accessors to"] + #[doc = "only track modifications when actual changes occur."] fn typed_mut(&mut self) -> &mut TaskStorage; - /// Track that a modification occurred for the given category. - /// - /// Should be called after confirming that data actually changed. - /// This is separate from `typed_mut()` to allow optimizations where - /// we only track modifications when something actually changes. + #[doc = "Track that a modification occurred for the given category."] + #[doc = ""] + #[doc = "Should be called after confirming that data actually changed."] + #[doc = "This is separate from `typed_mut()` to allow optimizations where"] + #[doc = "we only track modifications when something actually changes."] fn track_modification(&mut self, category: crate::backend::storage::SpecificTaskDataCategory); - /// Verify that the task was accessed with the correct category before reading/writing. - /// - /// This is a debug assertion that catches bugs where code tries to access data - /// without having restored it from storage first. - /// - /// The category parameter uses `TaskDataCategory`: - /// - `Data` or `Meta`: Checks that the task was accessed with that category - /// - `All`: Used for transient data - no check is performed - /// - /// Implementors should check that the provided category matches how the task was accessed. - fn check_access(&self, category: crate::backend::TaskDataCategory); - - /// Shrink all collection fields to fit their current contents. - /// - /// This releases excess memory from hash maps and hash sets that may have - /// grown larger than needed during task execution. - /// - /// Note: This does NOT track modifications since shrink_to_fit doesn't - /// semantically change the data - it only reduces memory usage. + #[doc = "Verify that the task was accessed with the correct category before reading/writing."] + #[doc = ""] + #[doc = "This is a debug assertion that catches bugs where code tries to access data"] + #[doc = "without having restored it from storage first."] + #[doc = ""] + #[doc = "The category parameter uses `SpecificTaskDataCategory`:"] + #[doc = "- `Data` or `Meta`: Checks that the task was accessed with that category"] + #[doc = ""] + #[doc = "Implementors should check that the provided category matches how the task was accessed."] + fn check_access(&self, category: crate::backend::storage::SpecificTaskDataCategory); + + #[doc = "Shrink all collection fields to fit their current contents."] + #[doc = ""] + #[doc = "This releases excess memory from hash maps and hash sets that may have"] + #[doc = "grown larger than needed during task execution."] + #[doc = ""] + #[doc = "Note: This does NOT track modifications since shrink_to_fit doesn't"] + #[doc = "semantically change the data - it only reduces memory usage."] fn shrink_to_fit(&mut self) { self.typed_mut().shrink_to_fit(); } #trait_methods - #adapter_methods } } } @@ -1361,7 +1237,6 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { let field_type = &field.field_type; let check_access = field.check_access_call(); let ref_expr = field.collection_ref_expr(); - let mut_expr = field.collection_mut_expr(); let is_option = field.is_option_ref(); match field.storage_type { @@ -1373,7 +1248,6 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { // For AutoSet types, generate read-only accessor, mutable accessor, and // add/remove/has/iter/len/is_empty let ref_name = field.ref_ident(); - let mut_name = field.mut_ident(); let (return_type, doc_comment) = if is_option { ( @@ -1388,24 +1262,12 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { ) }; - let track_modification = field.track_modification_call(); - let base_accessor = quote! { #[doc = #doc_comment] fn #ref_name(&self) -> #return_type { #check_access #ref_expr } - - /// Get a mutable reference to the collection (allocates if needed for lazy fields). - /// - /// Tracks modification pessimistically - assumes caller will mutate. - /// TODO: try to remove this method, tracking mutations before the mutation interferes with snapshotting logic - fn #mut_name(&mut self) -> &mut #field_type { - #check_access - #track_modification - #mut_expr - } }; let set_ops = generate_autoset_ops(field); @@ -1421,7 +1283,6 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { // For CounterMap types, generate read-only accessor, mutable accessor, and typed // mutation methods let ref_name = field.ref_ident(); - let mut_name = field.mut_ident(); let (return_type, doc_comment) = if is_option { ( @@ -1436,24 +1297,12 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { ) }; - let track_modification = field.track_modification_call(); - let base_accessor = quote! { #[doc = #doc_comment] fn #ref_name(&self) -> #return_type { #check_access #ref_expr } - - /// Get a mutable reference to the collection (allocates if needed for lazy fields). - /// - /// Tracks modification pessimistically - assumes caller will mutate. - /// TODO: try to remove this method, tracking mutations before the mutation interferes with snapshotting logic - fn #mut_name(&mut self) -> &mut #field_type { - #check_access - #track_modification - #mut_expr - } }; let countermap_ops = generate_countermap_ops(field); @@ -1468,7 +1317,6 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { StorageType::AutoMap => { // For AutoMap types, generate immutable and mutable accessors plus operation methods let ref_name = field.ref_ident(); - let mut_name = field.mut_ident(); let (return_type, ref_doc) = if is_option { ( @@ -1482,8 +1330,6 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { ) }; - let track_modification = field.track_modification_call(); - let base_accessor = quote! { #[doc = #ref_doc] fn #ref_name(&self) -> #return_type { @@ -1491,15 +1337,6 @@ fn generate_trait_accessor_methods(field: &FieldInfo) -> TokenStream { #ref_expr } - /// Get a mutable reference to the collection (allocates if needed for lazy fields). - /// - /// Tracks modification pessimistically - assumes caller will mutate. - /// TODO: try to remove this method, tracking mutations before the mutation interferes with snapshotting logic - fn #mut_name(&mut self) -> &mut #field_type { - #check_access - #track_modification - #mut_expr - } }; let automap_ops = generate_automap_ops(field); @@ -1553,79 +1390,80 @@ fn generate_direct_accessors(field: &FieldInfo) -> TokenStream { quote! { #field_type } }; - // Generate get_mut accessor for all direct fields + // Generate get_mut accessor for all direct transient fields + // We don't allow direct mutable access to persistent fields because it can interfere with + // mutation tracking and snapshotting let get_mut_accessor = { - let get_mut_name = field.get_mut_ident(); - if field.is_inline() { - // For inline fields, access the field directly - let field_name = &field.field_name; - if field.use_default { - // For fields with default semantics, always return Some(&mut self.field) - quote! { - /// Get a mutable reference to the field value. - /// - /// Tracks modification pessimistically - assumes caller will mutate. - /// TODO: try to remove this method, tracking mutations before the mutation interferes with snapshotting logic. - /// If this is needed then we need to use a guard struct that tracks `DerefMut` calls and calls track_modification - /// in Drop - fn #get_mut_name(&mut self) -> Option<&mut #value_type> { - #check_access - #track_modification - Some(&mut self.typed_mut().#field_name) + if field.is_transient() { + let get_mut_name = field.get_mut_ident(); + if field.is_inline() { + // For inline fields, access the field directly + let field_name = &field.field_name; + if field.use_default { + // For fields with default semantics, always return Some(&mut self.field) + quote! { + #[doc = "Get a mutable reference to the field value."] + #[doc = ""] + #[doc = "Tracks modification pessimistically - assumes caller will mutate."] + fn #get_mut_name(&mut self) -> &mut #value_type { + #check_access + #track_modification + &mut self.typed_mut().#field_name + } + } + } else { + // For Option fields, return as_mut() + quote! { + #[doc = "Get a mutable reference to the field value (if present)."] + fn #get_mut_name(&mut self) -> Option<&mut #value_type> { + #check_access + #track_modification + self.typed_mut().#field_name.as_mut() + } } } } else { - // For Option fields, return as_mut() + // For lazy fields, use the existing get_mut expression + let get_mut_expr = field.direct_get_mut_expr(); quote! { - /// Get a mutable reference to the field value (if present). - /// - /// Tracks modification pessimistically - assumes caller will mutate. + #[doc = "Get a mutable reference to the field value (if present)."] fn #get_mut_name(&mut self) -> Option<&mut #value_type> { #check_access #track_modification - self.typed_mut().#field_name.as_mut() + #get_mut_expr } } } } else { - // For lazy fields, use the existing get_mut expression - let get_mut_expr = field.direct_get_mut_expr(); - quote! { - /// Get a mutable reference to the field value (if present). - /// - /// Tracks modification pessimistically - assumes caller will mutate. - fn #get_mut_name(&mut self) -> Option<&mut #value_type> { - #check_access - #track_modification - #get_mut_expr - } - } + // Persistent fields don't allow direct mutable access as it interferes with mutation + // tracking + quote! {} } }; quote! { - /// Get a reference to the field value (if present) + #[doc = "Get a reference to the field value (if present)"] fn #get_name(&self) -> Option<&#value_type> { #check_access #get_expr } - /// Check if this field has a value + #[doc = "Check if this field has a value"] fn #has_name(&self) -> bool { #check_access #get_expr.is_some() } - /// Set the field value, returning the old value if present + #[doc = "Set the field value, returning the old value if present"] fn #set_name(&mut self, value: #value_type) -> Option<#value_type> { #check_access #track_modification #set_expr(value) } - /// Take the field value, clearing it - /// - /// Only tracks modification if there was a value to take. + #[doc = "Take the field value, clearing it"] + #[doc = ""] + #[doc = "Only tracks modification if there was a value to take."] fn #take_name(&mut self) -> Option<#value_type> { #check_access let value = #take_expr; @@ -1658,12 +1496,14 @@ fn generate_autoset_ops(field: &FieldInfo) -> TokenStream { let track_modification = field.track_modification_call(); let mut_expr = field.collection_mut_expr(); let ref_expr = field.collection_ref_expr(); + let take_expr = field.direct_take_expr(); let is_option = field.is_option_ref(); let add_name = field.prefixed_ident("add"); - let add_items_name = field.suffixed_ident("extend"); + let extend_name = field.prefixed_ident("extend"); let remove_name = field.prefixed_ident("remove"); - let has_name = field.prefixed_ident("has"); + let set_name = field.prefixed_ident("set"); + let has_name = field.suffixed_ident("contains"); let iter_name = field.iter_ident(); let len_name = field.len_ident(); let is_empty_name = field.is_empty_ident(); @@ -1718,15 +1558,30 @@ fn generate_autoset_ops(field: &FieldInfo) -> TokenStream { } }; + let set_body = if is_option { + let unwraper = field.lazy_unwrap_closure(); + let matches = field.lazy_matches_closure(); + let ctor = field.lazy_constructor(quote! {set}); + quote! { + self.typed_mut().set_lazy(#matches, #unwraper, #ctor) + } + } else { + quote! { + let old = #take_expr; + *#mut_expr = set; + Some(old) + } + }; + quote! { - /// Check if the set contains an item + #[doc = "Check if the set contains an item"] fn #has_name(&self, item: &#element_type) -> bool { #check_access #has_body } - /// Add an item to the set. - /// Returns true if the item was newly added, false if it already existed. + #[doc = "Add an item to the set."] + #[doc = "Returns true if the item was newly added, false if it already existed."] #[must_use] fn #add_name(&mut self, item: #element_type) -> bool { #check_access @@ -1737,9 +1592,9 @@ fn generate_autoset_ops(field: &FieldInfo) -> TokenStream { added } - /// Add multiple items to the set from an iterator. - /// Only tracks modification if at least one item is actually added. - fn #add_items_name(&mut self, items: impl Iterator) { + #[doc = "Add multiple items to the set from an iterator."] + #[doc = "Only tracks modification if at least one item is actually added."] + fn #extend_name(&mut self, items: impl IntoIterator) { #check_access let set = #mut_expr; let mut any_added = false; @@ -1753,26 +1608,35 @@ fn generate_autoset_ops(field: &FieldInfo) -> TokenStream { } } - /// Remove an item from the set. - /// Returns true if the item was present and removed, false if it wasn't present. + #[doc = "Remove an item from the set."] + #[doc = "Returns true if the item was present and removed, false if it wasn't present."] fn #remove_name(&mut self, item: &#element_type) -> bool { #check_access #remove_body } - /// Iterate over all items in the set + #[doc = "Remove multiple items from the set."] + #[doc = "Only tracks modification if at least one item is actually removed."] + fn #set_name(&mut self, set: #field_type) -> Option<#field_type> + { + #check_access + #track_modification + #set_body + } + + #[doc = "Iterate over all items in the set"] fn #iter_name(&self) -> impl Iterator + '_ { #check_access #iter_body } - /// Get the number of items in the set + #[doc = "Get the number of items in the set"] fn #len_name(&self) -> usize { #check_access #len_body } - /// Check if the set is empty + #[doc = "Check if the set is empty"] fn #is_empty_name(&self) -> bool { #check_access #is_empty_body @@ -1793,15 +1657,35 @@ fn generate_autoset_ops(field: &FieldInfo) -> TokenStream { /// - `update_{field}(key, f)` - Closure-based update /// - `add_{field}(key, value)` - Insert new, panics if exists /// - `remove_{field}(key) -> Option` - Standard HashMap remove -/// - `update_{field}_positive_crossing(key, delta) -> bool` - For i32 types -/// - `get_{field}_entry(key) -> Option<&V>` - Single-item lookup +/// - `get_{field}(key) -> Option<&V>` - Single-item lookup +/// +/// Additionally, for i32 value types only (signed counters): +/// - `update_{field}_positive_crossing(key, delta) -> bool` - Track positive boundary crossing +/// +/// Note: CounterMap only supports `i32` and `u32` value types. Other types will produce +/// a compile error. fn generate_countermap_ops(field: &FieldInfo) -> TokenStream { let field_type = &field.field_type; - let Some((key_type, value_type)) = extract_map_types(field_type, "CounterMap") else { + let Some((key_type_raw, value_type_raw)) = extract_map_types_raw(field_type, "CounterMap") + else { return quote! {}; }; + // Enforce that value type is either i32 or u32 + let is_signed = is_type_i32(value_type_raw); + let is_unsigned = is_type_u32(value_type_raw); + if !is_signed && !is_unsigned { + return syn::Error::new( + value_type_raw.span(), + "CounterMap value type must be `i32` or `u32`", + ) + .to_compile_error(); + } + + let key_type = quote! { #key_type_raw }; + let value_type = quote! { #value_type_raw }; + let check_access = field.check_access_call(); let track_modification = field.track_modification_call(); let mut_expr = field.collection_mut_expr(); @@ -1810,19 +1694,18 @@ fn generate_countermap_ops(field: &FieldInfo) -> TokenStream { // Method names - use shorter names to match existing API let update_count_name = field.infixed_ident("update", "count"); + let update_counts_name = field.infixed_ident("update", "counts"); let update_and_get_name = field.prefixed_ident("update_and_get"); let update_with_name = field.prefixed_ident("update"); let add_entry_name = field.prefixed_ident("add"); let remove_name = field.prefixed_ident("remove"); - let update_positive_crossing_name = field.infixed_ident("update", "positive_crossing"); - let get_entry_name = field.infixed_ident("get", "entry"); - let iter_entries_name = field.infixed_ident("iter", "entries"); - let iter_positive_entries_name = field.infixed_ident("iter", "positive_entries"); + let get_name = field.prefixed_ident("get"); + let iter_name = field.prefixed_ident("iter"); let len_name = field.len_ident(); let is_empty_name = field.is_empty_ident(); // Generate get_entry body based on whether ref access returns Option or not - let get_entry_body = if is_option { + let get_body = if is_option { quote! { #ref_expr.and_then(|m| m.get(key)) } } else { quote! { #ref_expr.get(key) } @@ -1867,105 +1750,108 @@ fn generate_countermap_ops(field: &FieldInfo) -> TokenStream { }; // Generate iter_entries body - let iter_entries_body = if is_option { + let iter_body = if is_option { quote! { #ref_expr.into_iter().flat_map(|m| m.iter()) } } else { quote! { #ref_expr.iter() } }; - // Generate iter_positive_entries body (entries where value > 0) - let iter_positive_entries_body = if is_option { + // Generate signed-type-specific methods only for i32 + let signed_methods = if is_signed { + let update_positive_crossing_name = field.infixed_ident("update", "positive_crossing"); + quote! { - #ref_expr.into_iter().flat_map(|m| { - m.iter().filter_map(|(k, v)| if *v > Default::default() { Some((k, v)) } else { None }) - }) + #[doc = "Update a signed counter by the given delta."] + #[doc = "Returns true if the count crossed the positive boundary (became positive or non-positive)."] + #[must_use] + fn #update_positive_crossing_name(&mut self, key: #key_type, delta: #value_type) -> bool { + #check_access + #track_modification + #mut_expr.update_positive_crossing(key, delta) + } } } else { - quote! { - #ref_expr.iter().filter_map(|(k, v)| if *v > Default::default() { Some((k, v)) } else { None }) - } + quote! {} }; quote! { - /// Get a single entry from the counter map - fn #get_entry_name(&self, key: &#key_type) -> Option<&#value_type> { + #[doc = "Get a single entry from the counter map"] + fn #get_name(&self, key: &#key_type) -> Option<&#value_type> { #check_access - #get_entry_body + #get_body } - /// Update a counter by the given delta. - /// Returns true if the count crossed zero (became zero or became non-zero). + #[doc = "Update a counter by the given delta."] + #[doc = "Returns true if the count crossed zero (became zero or became non-zero)."] #[must_use] fn #update_count_name(&mut self, key: #key_type, delta: #value_type) -> bool { #check_access #track_modification - #mut_expr.update_count(key, delta) + #mut_expr.update_count(key, delta) + } + + #[doc = "Update multiple counters by the given delta."] + #[doc = "More efficient than calling update_count in a loop."] + fn #update_counts_name(&mut self, keys: impl Iterator, delta: #value_type) { + #check_access + #track_modification + let map = #mut_expr; + for key in keys { + map.update_count(key, delta); + } } - /// Update a counter by the given delta and return the new value. + #[doc = "Update a counter by the given delta and return the new value."] fn #update_and_get_name(&mut self, key: #key_type, delta: #value_type) -> #value_type { #check_access #track_modification - #mut_expr.update_and_get(key, delta) + #mut_expr.update_and_get(key, delta) } - /// Update a counter using a closure that receives the current value - /// (or None if not present) and returns the new value (or None to remove). + #[doc = "Update a counter using a closure that receives the current value"] + #[doc = "(or None if not present) and returns the new value (or None to remove)."] fn #update_with_name(&mut self, key: #key_type, f: F) where F: FnOnce(Option<#value_type>) -> Option<#value_type>, { #check_access #track_modification - #mut_expr.update_with(key, f) + #mut_expr.update_with(key, f) } - /// Add a new entry, panicking if the entry already exists. + #[doc = "Add a new entry, panicking if the entry already exists."] fn #add_entry_name(&mut self, key: #key_type, value: #value_type) { #check_access #track_modification - #mut_expr.add_entry(key, value) + #mut_expr.add_entry(key, value) } - /// Remove an entry, returning the value if present. - /// Only tracks modification if an entry was actually removed. + #[doc = "Remove an entry, returning the value if present."] + #[doc = "Only tracks modification if an entry was actually removed."] fn #remove_name(&mut self, key: &#key_type) -> Option<#value_type> { #check_access #remove_body } - /// Update a signed counter by the given delta. - /// Returns true if the count crossed the positive boundary (became positive or non-positive). - #[must_use] - fn #update_positive_crossing_name(&mut self, key: #key_type, delta: #value_type) -> bool { - #check_access - #track_modification - #mut_expr.update_positive_crossing(key, delta) - } - - /// Get the number of entries in the counter map + #[doc = "Get the number of entries in the counter map"] fn #len_name(&self) -> usize { #check_access #len_body } - /// Check if the counter map is empty + #[doc = "Check if the counter map is empty"] fn #is_empty_name(&self) -> bool { #check_access #is_empty_body } - /// Iterate over all key-value pairs in the counter map - fn #iter_entries_name(&self) -> impl Iterator + '_ { + #[doc = "Iterate over all key-value pairs in the counter map. Guaranteed to return non-zero values."] + fn #iter_name(&self) -> impl Iterator + '_ { #check_access - #iter_entries_body + #iter_body } - /// Iterate over key-value pairs where value > 0 - fn #iter_positive_entries_name(&self) -> impl Iterator + '_ { - #check_access - #iter_positive_entries_body - } + #signed_methods } } @@ -1996,12 +1882,12 @@ fn generate_automap_ops(field: &FieldInfo) -> TokenStream { let ref_expr = field.collection_ref_expr(); let is_option = field.is_option_ref(); - // Method names (using `_entry` suffix for consistency with CounterMap) - let get_entry_name = field.infixed_ident("get", "entry"); - let has_entry_name = field.infixed_ident("has", "entry"); - let insert_entry_name = field.infixed_ident("insert", "entry"); - let remove_entry_name = field.infixed_ident("remove", "entry"); - let iter_entries_name = field.infixed_ident("iter", "entries"); + let get_entry_name = field.prefixed_ident("get"); + let has_entry_name = field.suffixed_ident("contains"); + let insert_entry_name = field.prefixed_ident("insert"); + let remove_entry_name = field.prefixed_ident("remove"); + let iter_entries_name = field.prefixed_ident("iter"); + let take_name = field.prefixed_ident("take"); let len_name = field.len_ident(); let is_empty_name = field.is_empty_ident(); @@ -2036,6 +1922,11 @@ fn generate_automap_ops(field: &FieldInfo) -> TokenStream { quote! { #ref_expr.is_empty() } }; + let take_expression = { + let take_name = field.take_ident(); + quote! {self.typed_mut().#take_name();} + }; + // Generate remove body - for lazy fields, avoid allocation if map doesn't exist. // Using ? operator to early-return None if map doesn't exist. let remove_body = if is_option { @@ -2059,45 +1950,58 @@ fn generate_automap_ops(field: &FieldInfo) -> TokenStream { }; quote! { - /// Get an entry from the map by key + #[doc = "Get an entry from the map by key"] fn #get_entry_name(&self, key: &#key_type) -> Option<&#value_type> { #check_access #get_entry_body } - /// Check if the map contains a key + #[doc = "Check if the map contains a key"] fn #has_entry_name(&self, key: &#key_type) -> bool { #check_access #has_entry_body } - /// Insert an entry, returning the old value if present. + #[doc = "Insert an entry, returning the old value if present."] fn #insert_entry_name(&mut self, key: #key_type, value: #value_type) -> Option<#value_type> { #check_access #track_modification #mut_expr.insert(key, value) } - /// Remove an entry, returning the value if present. - /// Only tracks modification if an entry was actually removed. + + #[doc = "Remove an entry, returning the value if present."] + #[doc = "Only tracks modification if an entry was actually removed."] fn #remove_entry_name(&mut self, key: &#key_type) -> Option<#value_type> { #check_access #remove_body } - /// Iterate over all key-value pairs in the map + + #[doc = "Remove the full map and return it"] + #[doc = "Only tracks modification if an entry was actually removed."] + fn #take_name(&mut self) -> Option<#field_type> { + #check_access + let value = #take_expression; + if value.is_some() { + #track_modification + } + value + } + + #[doc = "Iterate over all key-value pairs in the map"] fn #iter_entries_name(&self) -> impl Iterator + '_ { #check_access #iter_body } - /// Get the number of entries in the map + #[doc = "Get the number of entries in the map"] fn #len_name(&self) -> usize { #check_access #len_body } - /// Check if the map is empty + #[doc = "Check if the map is empty"] fn #is_empty_name(&self) -> bool { #check_access #is_empty_body @@ -2135,9 +2039,9 @@ fn generate_shrink_accessor(field: &FieldInfo) -> TokenStream { }; quote! { - /// Shrink the collection to fit its current contents, releasing excess memory. - /// - /// This does NOT track modifications since it doesn't change the data semantically. + #[doc = "Shrink the collection to fit its current contents, releasing excess memory."] + #[doc = ""] + #[doc = "This does NOT track modifications since it doesn't change the data semantically."] fn #shrink_name(&mut self) { #shrink_body } @@ -2175,6 +2079,12 @@ fn extract_set_element_type(ty: &Type) -> Option { /// Extract key and value types from a map type (e.g., AutoMap or CounterMap) fn extract_map_types(ty: &Type, expected_name: &str) -> Option<(TokenStream, TokenStream)> { + let (key_type, value_type) = extract_map_types_raw(ty, expected_name)?; + Some((quote! { #key_type }, quote! { #value_type })) +} + +/// Extract key and value types from a map type, returning the raw Type references. +fn extract_map_types_raw<'a>(ty: &'a Type, expected_name: &str) -> Option<(&'a Type, &'a Type)> { if let Type::Path(type_path) = ty && let Some(segment) = type_path.path.segments.last() && segment.ident == expected_name @@ -2184,12 +2094,36 @@ fn extract_map_types(ty: &Type, expected_name: &str) -> Option<(TokenStream, Tok if let Some(syn::GenericArgument::Type(key_type)) = args_iter.next() && let Some(syn::GenericArgument::Type(value_type)) = args_iter.next() { - return Some((quote! { #key_type }, quote! { #value_type })); + return Some((key_type, value_type)); } } None } +/// Check if a type is specifically `i32`. +fn is_type_i32(ty: &Type) -> bool { + is_primitive_type(ty, "i32") +} + +/// Check if a type is specifically `u32`. +fn is_type_u32(ty: &Type) -> bool { + is_primitive_type(ty, "u32") +} + +/// Check if a type is a specific primitive type (e.g., "i32", "u32"). +fn is_primitive_type(ty: &Type, name: &str) -> bool { + if let Type::Path(type_path) = ty + && type_path.qself.is_none() + && type_path.path.segments.len() == 1 + && let Some(segment) = type_path.path.segments.first() + && segment.ident == name + && segment.arguments.is_none() + { + return true; + } + false +} + fn capitalize(s: &str) -> String { let mut c = s.chars(); match c.next() { @@ -2214,15 +2148,15 @@ fn generate_flag_trait_accessor_methods(field: &FieldInfo) -> TokenStream { let track_modification = quote! { self.track_modification(crate::backend::storage::SpecificTaskDataCategory::Meta); }; quote! { - /// Get the flag value + #[doc = "Get the flag value"] fn #field_name(&self) -> bool { #check_access self.typed().flags.#field_name() } - /// Set the flag value - /// - /// Only tracks modification if the value actually changes. + #[doc = "Set the flag value"] + #[doc = ""] + #[doc = "Only tracks modification if the value actually changes."] fn #set_name(&mut self, value: bool) { #check_access let current = self.typed().flags.#field_name(); @@ -2854,1124 +2788,3 @@ fn generate_snapshot_restore_methods(grouped_fields: &GroupedFields) -> TokenStr } } } - -// ============================================================================= -// CachedDataItem Adapter Code Generation -// ============================================================================= - -/// Generate the CachedDataItem adapter methods for the TaskStorageAccessors trait. -/// -/// These methods provide the CachedDataItem API (add, insert, get, remove, etc.) -/// using the typed accessor methods from the trait. This enables proper access -/// tracking via check_access() and track_modification(). -fn generate_cached_data_adapter_trait_methods( - grouped_fields: &GroupedFields, -) -> proc_macro2::TokenStream { - // Only generate if there are fields with variants - if !grouped_fields.has_fields_with_variant() { - return quote! {}; - } - - // Generate match arms for each method - let insert_kv_arms = generate_insert_kv_arms(grouped_fields); - let add_arms = generate_add_arms(grouped_fields); - let get_arms = generate_get_arms(grouped_fields); - let remove_arms = generate_remove_arms(grouped_fields); - let get_mut_arms = generate_get_mut_arms(grouped_fields); - let iter_arms = generate_iter_arms(grouped_fields); - let count_arms = generate_count_arms(grouped_fields); - let extend_arms = generate_extend_arms(grouped_fields); - let extract_if_arms = generate_extract_if_arms(grouped_fields); - - quote! { - // ========================================================================= - // CachedDataItem Adapter Methods - // - // These methods provide backward compatibility with the CachedDataItem API - // while using typed accessors internally for proper access tracking. - // ========================================================================= - - /// Add a CachedDataItem to storage. - /// - /// Returns `true` if the item was newly added, `false` if it already existed. - /// Does NOT overwrite if the key already exists. - fn add(&mut self, item: crate::data::CachedDataItem) -> bool { - use crate::data::{CachedDataItemKey, CachedDataItemValue}; - use turbo_tasks::KeyValuePair; - let (key, value) = item.into_key_and_value(); - match (key, value) { - #add_arms - - // Catch-all for mismatched key/value types - #[allow(unreachable_patterns)] - (key, value) => { - panic!( - "Mismatched CachedDataItem key/value types: key={key:?}, value={value:?}" - ); - } - } - } - - /// Insert a CachedDataItem, returning the old value if present. - fn insert( - &mut self, - item: crate::data::CachedDataItem, - ) -> Option { - use turbo_tasks::KeyValuePair; - let (key, value) = item.into_key_and_value(); - self.insert_kv(key, value) - } - - /// Insert a key-value pair, returning the old value if present. - fn insert_kv( - &mut self, - key: crate::data::CachedDataItemKey, - value: crate::data::CachedDataItemValue, - ) -> Option { - use crate::data::{CachedDataItemKey, CachedDataItemValue}; - match (key, value) { - #insert_kv_arms - - // Catch-all for mismatched key/value types - #[allow(unreachable_patterns)] - (key, value) => { - panic!( - "Mismatched CachedDataItem key/value types: key={key:?}, value={value:?}" - ); - } - } - } - - /// Check if a key exists in storage. - fn contains_key(&self, key: &crate::data::CachedDataItemKey) -> bool { - self.get(key).is_some() - } - - /// Check if a key exists in storage (alias for contains_key for backward compatibility). - fn has_key(&self, key: &crate::data::CachedDataItemKey) -> bool { - self.contains_key(key) - } - - /// Get a reference to a CachedDataItem value by key. - fn get( - &self, - key: &crate::data::CachedDataItemKey, - ) -> Option> { - use crate::data::{CachedDataItemKey, CachedDataItemValueRef}; - match key { - #get_arms - } - } - - /// Remove a CachedDataItem by key, returning the value if present. - fn remove( - &mut self, - key: &crate::data::CachedDataItemKey, - ) -> Option { - use crate::data::{CachedDataItemKey, CachedDataItemValue}; - match key { - #remove_arms - } - } - - /// Get a mutable reference to a CachedDataItem value by key. - fn get_mut( - &mut self, - key: &crate::data::CachedDataItemKey, - ) -> Option> { - use crate::data::{CachedDataItemKey, CachedDataItemValueRefMut}; - match key { - #get_mut_arms - } - } - - /// Update a value in-place, creating it if it doesn't exist. - fn update( - &mut self, - key: crate::data::CachedDataItemKey, - update: impl FnOnce(Option) -> Option, - ) { - use turbo_tasks::KeyValuePair; - let old_value = self.remove(&key); - if let Some(new_value) = update(old_value) { - let item = crate::data::CachedDataItem::from_key_and_value(key, new_value); - self.add(item); - } - } - - /// Get a mutable reference or insert a value created by the given closure. - fn get_mut_or_insert_with( - &mut self, - key: crate::data::CachedDataItemKey, - insert: impl FnOnce() -> crate::data::CachedDataItemValue, - ) -> crate::data::CachedDataItemValueRefMut<'_> { - use turbo_tasks::KeyValuePair; - if self.get(&key).is_none() { - let value = insert(); - self.insert_kv(key.clone(), value); - } - self.get_mut(&key).expect("just inserted") - } - - /// Count items of a specific type. - fn count(&self, ty: crate::data::CachedDataItemType) -> usize { - use crate::data::CachedDataItemType; - match ty { - #count_arms - } - } - - /// Iterate over items of a specific type. - fn iter( - &self, - ty: crate::data::CachedDataItemType, - ) -> Box)> + '_> { - use crate::data::{CachedDataItemKey, CachedDataItemType, CachedDataItemValueRef}; - - match ty { - #iter_arms - } - } - - /// Extend storage with items from an iterator. - /// Returns `true` if all items were newly added, `false` if any already existed. - fn extend( - &mut self, - ty: crate::data::CachedDataItemType, - items: impl IntoIterator, - ) -> bool { - use crate::data::{CachedDataItemKey, CachedDataItemType, CachedDataItemValue}; - use turbo_tasks::KeyValuePair; - - match ty { - #extend_arms - } - } - - /// Add a CachedDataItem that must not already exist. - /// Panics if the item already exists. - fn add_new(&mut self, item: crate::data::CachedDataItem) { - if !self.add(item) { - panic!("add_new: item already exists"); - } - } - - /// Extend with items that must all be new. - /// Panics if any item already exists. - fn extend_new( - &mut self, - _ty: crate::data::CachedDataItemType, - items: impl IntoIterator, - ) { - for item in items { - self.add_new(item); - } - } - - /// Remove items matching a predicate. - fn extract_if<'a, F>( - &'a mut self, - ty: crate::data::CachedDataItemType, - mut predicate: F, - ) -> Vec - where - F: for<'b> FnMut(crate::data::CachedDataItemKey, crate::data::CachedDataItemValueRef<'b>) -> bool + 'a, - { - use crate::data::{CachedDataItem, CachedDataItemKey, CachedDataItemType, CachedDataItemValue, CachedDataItemValueRef}; - use turbo_tasks::KeyValuePair; - - match ty { - #extract_if_arms - } - } - - } -} - -/// Generate insert_kv match arms for all fields with variants. -fn generate_insert_kv_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_insert_kv_arm) - .collect(); - - quote! { #(#arms)* } -} - -/// Generate a single insert_kv match arm for a field. -fn generate_insert_kv_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_insert_kv_arm_direct(field, variant), - StorageType::AutoSet => generate_insert_kv_arm_auto_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => { - generate_insert_kv_arm_map(field, variant) - } - StorageType::Flag => generate_insert_kv_arm_flag(field, variant), - } -} - -fn generate_insert_kv_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let set_name = field.set_ident(); - quote! { - ( - CachedDataItemKey::#variant {}, - CachedDataItemValue::#variant { value }, - ) => self - .#set_name(value) - .map(|v| CachedDataItemValue::#variant { value: v }), - } -} - -fn generate_insert_kv_arm_auto_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - let key_expr = field.key_expr(); - quote! { - ( - CachedDataItemKey::#variant { #key_pattern }, - CachedDataItemValue::#variant { value: () }, - ) => { - let existed = !self.#field_mut().insert(#key_expr); - if existed { - Some(CachedDataItemValue::#variant { value: () }) - } else { - None - } - }, - } -} - -fn generate_insert_kv_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - let key_expr = field.key_expr(); - quote! { - ( - CachedDataItemKey::#variant { #key_pattern }, - CachedDataItemValue::#variant { value }, - ) => self - .#field_mut() - .insert(#key_expr, value) - .map(|v| CachedDataItemValue::#variant { value: v }), - } -} - -fn generate_insert_kv_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let set_name = field.set_ident(); - quote! { - ( - CachedDataItemKey::#variant {}, - CachedDataItemValue::#variant { value: () }, - ) => { - let existed = self.typed().flags.#field_name(); - self.typed_mut().flags.#set_name(true); - if existed { - Some(CachedDataItemValue::#variant { value: () }) - } else { - None - } - }, - } -} - -/// Generate get match arms for all fields with variants. -fn generate_get_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_get_arm) - .collect(); - - quote! { #(#arms)* } -} - -/// Generate a single get match arm for a field. -fn generate_get_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_get_arm_direct(field, variant), - StorageType::AutoSet => generate_get_arm_auto_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_get_arm_map(field, variant), - StorageType::Flag => generate_get_arm_flag(field, variant), - } -} - -fn generate_get_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let get_name = field.get_ident(); - quote! { - CachedDataItemKey::#variant {} => self - .#get_name() - .map(|value| CachedDataItemValueRef::#variant { value }), - } -} - -fn generate_get_arm_auto_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let key_pattern = field.key_pattern(); - // Use cloned key expr since the key is passed by reference - let key_expr = field.key_expr_cloned(); - - if field.is_inline() { - quote! { - CachedDataItemKey::#variant { #key_pattern } => { - if self.#field_name().contains(&#key_expr) { - Some(CachedDataItemValueRef::#variant { value: &() }) - } else { - None - } - }, - } - } else { - quote! { - CachedDataItemKey::#variant { #key_pattern } => self.#field_name().and_then(|set| { - if set.contains(&#key_expr) { - Some(CachedDataItemValueRef::#variant { value: &() }) - } else { - None - } - }), - } - } -} - -fn generate_get_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let key_pattern = field.key_pattern(); - // Use cloned key expr since the key is passed by reference - let key_expr = field.key_expr_cloned(); - - if field.is_inline() { - quote! { - CachedDataItemKey::#variant { #key_pattern } => self - .#field_name() - .get(&#key_expr) - .map(|value| CachedDataItemValueRef::#variant { value }), - } - } else { - quote! { - CachedDataItemKey::#variant { #key_pattern } => self - .#field_name() - .and_then(|map| map.get(&#key_expr)) - .map(|value| CachedDataItemValueRef::#variant { value }), - } - } -} - -fn generate_get_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - quote! { - CachedDataItemKey::#variant {} => { - if self.typed().flags.#field_name() { - Some(CachedDataItemValueRef::#variant { value: &() }) - } else { - None - } - }, - } -} - -/// Generate remove match arms for all fields with variants. -fn generate_remove_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_remove_arm) - .collect(); - - quote! { #(#arms)* } -} - -/// Generate a single remove match arm for a field. -fn generate_remove_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_remove_arm_direct(field, variant), - StorageType::AutoSet => generate_remove_arm_auto_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_remove_arm_map(field, variant), - StorageType::Flag => generate_remove_arm_flag(field, variant), - } -} - -fn generate_remove_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let take_name = field.take_ident(); - quote! { - CachedDataItemKey::#variant {} => self - .#take_name() - .map(|value| CachedDataItemValue::#variant { value }), - } -} - -fn generate_remove_arm_auto_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - // Use cloned key expr since the key is passed by reference - let key_expr = field.key_expr_cloned(); - quote! { - CachedDataItemKey::#variant { #key_pattern } => { - if self.#field_mut().remove(&#key_expr) { - Some(CachedDataItemValue::#variant { value: () }) - } else { - None - } - }, - } -} - -fn generate_remove_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - // Use cloned key expr since the key is passed by reference - let key_expr = field.key_expr_cloned(); - quote! { - CachedDataItemKey::#variant { #key_pattern } => self - .#field_mut() - .remove(&#key_expr) - .map(|value| CachedDataItemValue::#variant { value }), - } -} - -fn generate_remove_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let set_name = field.set_ident(); - quote! { - CachedDataItemKey::#variant {} => { - let existed = self.typed().flags.#field_name(); - self.typed_mut().flags.#set_name(false); - if existed { - Some(CachedDataItemValue::#variant { value: () }) - } else { - None - } - }, - } -} - -/// Generate get_mut match arms for all fields with variants. -fn generate_get_mut_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_get_mut_arm) - .collect(); - - quote! { #(#arms)* } -} - -/// Generate a single get_mut match arm for a field. -/// For types that don't support mutable access (flags, sets, multimaps), generates an arm -/// that returns None. -fn generate_get_mut_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_get_mut_arm_direct(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_get_mut_arm_map(field, variant), - // AutoSet, and Flag don't support get_mut (value is always ()) - // But we need to include the match arm to make the match exhaustive - StorageType::AutoSet => generate_get_mut_arm_set_none(field, variant), - StorageType::Flag => generate_get_mut_arm_flag_none(field, variant), - } -} - -fn generate_get_mut_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let get_mut_name = field.get_mut_ident(); - quote! { - CachedDataItemKey::#variant {} => self - .#get_mut_name() - .map(|value| CachedDataItemValueRefMut::#variant { value }), - } -} - -fn generate_get_mut_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - // Use cloned key expr since the key is passed by reference - let key_expr = field.key_expr_cloned(); - quote! { - CachedDataItemKey::#variant { #key_pattern } => self - .#field_mut() - .get_mut(&#key_expr) - .map(|value| CachedDataItemValueRefMut::#variant { value }), - } -} - -/// Generate a get_mut arm for AutoSet types that returns None. -/// AutoSet values are always () so mutable access is not meaningful. -fn generate_get_mut_arm_set_none(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - // Ignore all key fields with _ - let ignored_fields: Vec<_> = field - .key_fields - .as_ref() - .map(|fields| fields.iter().map(|f| quote! { #f: _ }).collect()) - .unwrap_or_default(); - quote! { - CachedDataItemKey::#variant { #(#ignored_fields),* } => None, - } -} - -/// Generate a get_mut arm for Flag types that returns None. -/// Flag values are booleans so mutable access is not meaningful. -fn generate_get_mut_arm_flag_none(_field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - quote! { - CachedDataItemKey::#variant {} => None, - } -} - -/// Generate count match arms for all fields with variants. -fn generate_count_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_count_arm) - .collect(); - - quote! { #(#arms)* } -} - -/// Generate a single count match arm for a field. -fn generate_count_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_count_arm_direct(field, variant), - StorageType::AutoSet => generate_count_arm_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_count_arm_map(field, variant), - StorageType::Flag => generate_count_arm_flag(field, variant), - } -} - -fn generate_count_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let get_name = field.get_ident(); - quote! { - CachedDataItemType::#variant => if self.#get_name().is_some() { 1 } else { 0 }, - } -} - -fn generate_count_arm_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - if field.is_inline() { - quote! { - CachedDataItemType::#variant => self.#field_name().len(), - } - } else { - quote! { - CachedDataItemType::#variant => self.#field_name().map(|s| s.len()).unwrap_or(0), - } - } -} - -fn generate_count_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - if field.is_inline() { - quote! { - CachedDataItemType::#variant => self.#field_name().len(), - } - } else { - quote! { - CachedDataItemType::#variant => self.#field_name().map(|m| m.len()).unwrap_or(0), - } - } -} - -fn generate_count_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - quote! { - CachedDataItemType::#variant => if self.typed().flags.#field_name() { 1 } else { 0 }, - } -} - -/// Generate iter match arms for all fields with variants. -fn generate_iter_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_iter_arm) - .collect(); - - quote! { #(#arms)* } -} - -/// Generate a single iter match arm for a field. -fn generate_iter_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_iter_arm_direct(field, variant), - StorageType::AutoSet => generate_iter_arm_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_iter_arm_map(field, variant), - StorageType::Flag => generate_iter_arm_flag(field, variant), - } -} - -fn generate_iter_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let get_name = field.get_ident(); - quote! { - CachedDataItemType::#variant => Box::new( - self.#get_name() - .into_iter() - .map(|value| (CachedDataItemKey::#variant {}, CachedDataItemValueRef::#variant { value })), - ), - } -} - -fn generate_iter_arm_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let key_expr = field.key_expr(); - let key_construction = field.key_construction(); - - if field.is_inline() { - quote! { - CachedDataItemType::#variant => Box::new(self.#field_name().iter().map(|#key_expr| { - ( - CachedDataItemKey::#variant { #key_construction }, - CachedDataItemValueRef::#variant { value: &() }, - ) - })), - } - } else { - quote! { - CachedDataItemType::#variant => Box::new( - self.#field_name() - .into_iter() - .flat_map(|set| set.iter()) - .map(|#key_expr| { - ( - CachedDataItemKey::#variant { #key_construction }, - CachedDataItemValueRef::#variant { value: &() }, - ) - }), - ), - } - } -} - -fn generate_iter_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let key_expr = field.key_expr(); - let key_construction = field.key_construction(); - - if field.is_inline() { - quote! { - CachedDataItemType::#variant => Box::new(self.#field_name().iter().map(|(#key_expr, value)| { - ( - CachedDataItemKey::#variant { #key_construction }, - CachedDataItemValueRef::#variant { value }, - ) - })), - } - } else { - quote! { - CachedDataItemType::#variant => Box::new( - self.#field_name() - .into_iter() - .flat_map(|m| m.iter()) - .map(|(#key_expr, value)| { - ( - CachedDataItemKey::#variant { #key_construction }, - CachedDataItemValueRef::#variant { value }, - ) - }), - ), - } - } -} - -fn generate_iter_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - quote! { - CachedDataItemType::#variant => Box::new( - self.typed() - .flags - .#field_name() - .then_some(( - CachedDataItemKey::#variant {}, - CachedDataItemValueRef::#variant { value: &() }, - )) - .into_iter(), - ), - } -} - -// ============================================================================= -// Optimized add() match arms -// ============================================================================= - -/// Generate add match arms for all fields with variants. -/// Delegates to typed methods for single check_access + conditional track_modification. -fn generate_add_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_add_arm) - .collect(); - - quote! { #(#arms)* } -} - -fn generate_add_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_add_arm_direct(field, variant), - StorageType::AutoSet => generate_add_arm_auto_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_add_arm_map(field, variant), - StorageType::Flag => generate_add_arm_flag(field, variant), - } -} - -fn generate_add_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let has_name = field.has_ident(); - let set_name = field.set_ident(); - quote! { - ( - CachedDataItemKey::#variant {}, - CachedDataItemValue::#variant { value }, - ) => { - if self.#has_name() { - false - } else { - self.#set_name(value); - true - } - }, - } -} - -fn generate_add_arm_auto_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let add_name = field.prefixed_ident("add"); - let key_pattern = field.key_pattern(); - let key_expr = field.key_expr(); - quote! { - ( - CachedDataItemKey::#variant { #key_pattern }, - CachedDataItemValue::#variant { value: () }, - ) => self.#add_name(#key_expr), - } -} - -fn generate_add_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - let key_expr = field.key_expr(); - - if field.is_inline() { - quote! { - ( - CachedDataItemKey::#variant { #key_pattern }, - CachedDataItemValue::#variant { value }, - ) => { - if self.#field_name().get(&#key_expr).is_some() { - false - } else { - self.#field_mut().insert(#key_expr, value); - true - } - }, - } - } else { - quote! { - ( - CachedDataItemKey::#variant { #key_pattern }, - CachedDataItemValue::#variant { value }, - ) => { - if self.#field_name().is_some_and(|m| m.get(&#key_expr).is_some()) { - false - } else { - self.#field_mut().insert(#key_expr, value); - true - } - }, - } - } -} - -fn generate_add_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let set_name = field.set_ident(); - quote! { - ( - CachedDataItemKey::#variant {}, - CachedDataItemValue::#variant { value: () }, - ) => { - if self.typed().flags.#field_name() { - false - } else { - self.typed_mut().flags.#set_name(true); - true - } - }, - } -} - -// ============================================================================= -// Optimized extend() match arms -// ============================================================================= - -/// Generate extend match arms for all fields with variants. -/// Uses typed batch methods for single check_access + single track_modification. -fn generate_extend_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_extend_arm) - .collect(); - - quote! { #(#arms)* } -} - -fn generate_extend_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_extend_arm_direct(field, variant), - StorageType::AutoSet => generate_extend_arm_auto_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => generate_extend_arm_map(field, variant), - StorageType::Flag => generate_extend_arm_flag(field, variant), - } -} - -fn generate_extend_arm_direct(_field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - // Direct fields can only have one value, so just delegate to add() for the first item - quote! { - CachedDataItemType::#variant => { - for item in items { - return self.add(item); - } - true - }, - } -} - -fn generate_extend_arm_auto_set(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let extend_name = field.suffixed_ident("extend"); - let key_pattern = field.key_pattern(); - let key_expr = field.key_expr(); - quote! { - CachedDataItemType::#variant => { - // Delegate to typed extend method which batches check_access + track_modification - self.#extend_name(items.into_iter().filter_map(|item| { - match item.into_key_and_value() { - (CachedDataItemKey::#variant { #key_pattern }, _) => Some(#key_expr), - _ => None, - } - })); - true // AutoSet extend doesn't track per-item success - }, - } -} - -fn generate_extend_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_mut = field.mut_ident(); - let key_pattern = field.key_pattern(); - let key_expr = field.key_expr(); - quote! { - CachedDataItemType::#variant => { - let mut all_new = true; - // Get mutable reference once (single check_access + track_modification) - let map = self.#field_mut(); - for item in items { - if let (CachedDataItemKey::#variant { #key_pattern }, CachedDataItemValue::#variant { value }) - = item.into_key_and_value() - { - if map.insert(#key_expr, value).is_some() { - all_new = false; - } - } - } - all_new - }, - } -} - -fn generate_extend_arm_flag(_field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - // Flags can only have one value, so just delegate to add() for the first item - quote! { - CachedDataItemType::#variant => { - for item in items { - return self.add(item); - } - true - }, - } -} - -// ============================================================================= -// Optimized extract_if() match arms -// ============================================================================= - -/// Generate extract_if match arms for all fields with variants. -/// Uses retain pattern for single check_access + single track_modification. -fn generate_extract_if_arms(grouped_fields: &GroupedFields) -> proc_macro2::TokenStream { - let arms: Vec<_> = grouped_fields - .fields_with_variant() - .map(generate_extract_if_arm) - .collect(); - - quote! { #(#arms)* } -} - -fn generate_extract_if_arm(field: &FieldInfo) -> proc_macro2::TokenStream { - let variant = field.cached_data_variant_ident(); - - match &field.storage_type { - StorageType::Direct => generate_extract_if_arm_direct(field, variant), - StorageType::AutoSet => generate_extract_if_arm_auto_set(field, variant), - StorageType::CounterMap | StorageType::AutoMap => { - generate_extract_if_arm_map(field, variant) - } - StorageType::Flag => generate_extract_if_arm_flag(field, variant), - } -} - -fn generate_extract_if_arm_direct(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let take_name = field.take_ident(); - let get_name = field.get_ident(); - quote! { - CachedDataItemType::#variant => { - let key = CachedDataItemKey::#variant {}; - if let Some(value_ref) = self.#get_name() { - if predicate(key.clone(), CachedDataItemValueRef::#variant { value: value_ref }) { - if let Some(value) = self.#take_name() { - return vec![CachedDataItem::from_key_and_value(key, CachedDataItemValue::#variant { value })]; - } - } - } - Vec::new() - }, - } -} - -fn generate_extract_if_arm_auto_set( - field: &FieldInfo, - variant: &Ident, -) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let key_expr = field.key_expr(); - let key_construction = field.key_construction(); - - // AutoSet doesn't have retain, so we collect keys first and then remove them. - // The typed iter_X and remove_X methods handle check_access/track_modification efficiently. - if field.is_inline() { - quote! { - CachedDataItemType::#variant => { - // First pass: collect keys to remove (single check_access via iter) - let keys_to_remove: Vec<_> = self.#field_name().iter().filter_map(|#key_expr| { - let key = CachedDataItemKey::#variant { #key_construction }; - let value_ref = CachedDataItemValueRef::#variant { value: &() }; - if predicate(key.clone(), value_ref) { - Some(key) - } else { - None - } - }).collect(); - - // Second pass: remove items using the adapter's remove method - keys_to_remove.into_iter().filter_map(|key| { - self.remove(&key).map(|value| { - CachedDataItem::from_key_and_value(key, value) - }) - }).collect() - }, - } - } else { - // Lazy autoset iter methods return owned values (via .copied()), so use - // key_construction_owned - let iter_name = field.iter_ident(); - let key_construction_owned = field.key_construction_owned(); - quote! { - CachedDataItemType::#variant => { - // First pass: collect keys to remove (single check_access via iter) - let keys_to_remove: Vec<_> = self.#iter_name().filter_map(|#key_expr| { - let key = CachedDataItemKey::#variant { #key_construction_owned }; - let value_ref = CachedDataItemValueRef::#variant { value: &() }; - if predicate(key.clone(), value_ref) { - Some(key) - } else { - None - } - }).collect(); - - // Second pass: remove items using the adapter's remove method - keys_to_remove.into_iter().filter_map(|key| { - self.remove(&key).map(|value| { - CachedDataItem::from_key_and_value(key, value) - }) - }).collect() - }, - } - } -} - -fn generate_extract_if_arm_map(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let key_expr = field.key_expr(); - let key_construction = field.key_construction(); - - // Maps (CounterMap/AutoMap) don't have retain with the signature we need, - // so we collect keys first and then remove them. - // Use the adapter's remove method which handles track_modification. - if field.is_inline() { - quote! { - CachedDataItemType::#variant => { - // First pass: collect keys to remove - let keys_to_remove: Vec<_> = self.#field_name().iter().filter_map(|(#key_expr, value)| { - let key = CachedDataItemKey::#variant { #key_construction }; - let value_ref = CachedDataItemValueRef::#variant { value }; - if predicate(key.clone(), value_ref) { - Some(key) - } else { - None - } - }).collect(); - - // Second pass: remove and collect using the adapter's remove method - keys_to_remove.into_iter().filter_map(|key| { - self.remove(&key).map(|value| { - CachedDataItem::from_key_and_value(key, value) - }) - }).collect() - }, - } - } else { - // For lazy map fields, use iter_X_entries which iterates over (key, value) pairs - let iter_entries_name = field.infixed_ident("iter", "entries"); - quote! { - CachedDataItemType::#variant => { - // First pass: collect keys to remove - let keys_to_remove: Vec<_> = self.#iter_entries_name().filter_map(|(#key_expr, value)| { - let key = CachedDataItemKey::#variant { #key_construction }; - let value_ref = CachedDataItemValueRef::#variant { value }; - if predicate(key.clone(), value_ref) { - Some(key) - } else { - None - } - }).collect(); - - // Second pass: remove and collect using the adapter's remove method - keys_to_remove.into_iter().filter_map(|key| { - self.remove(&key).map(|value| { - CachedDataItem::from_key_and_value(key, value) - }) - }).collect() - }, - } - } -} - -fn generate_extract_if_arm_flag(field: &FieldInfo, variant: &Ident) -> proc_macro2::TokenStream { - let field_name = &field.field_name; - let set_name = field.set_ident(); - quote! { - CachedDataItemType::#variant => { - let key = CachedDataItemKey::#variant {}; - if self.typed().flags.#field_name() { - let value_ref = CachedDataItemValueRef::#variant { value: &() }; - if predicate(key.clone(), value_ref) { - self.typed_mut().flags.#set_name(false); - return vec![CachedDataItem::from_key_and_value(key, CachedDataItemValue::#variant { value: () })]; - } - } - Vec::new() - }, - } -} diff --git a/turbopack/crates/turbo-tasks-macros/src/lib.rs b/turbopack/crates/turbo-tasks-macros/src/lib.rs index 38f0796c0bf19..dbbaa078ce4a4 100644 --- a/turbopack/crates/turbo-tasks-macros/src/lib.rs +++ b/turbopack/crates/turbo-tasks-macros/src/lib.rs @@ -57,15 +57,6 @@ pub fn derive_task_input(input: TokenStream) -> TokenStream { derive::derive_task_input(input) } -/// -#[proc_macro_derive(KeyValuePair)] -pub fn derive_key_value_pair(input: TokenStream) -> TokenStream { - derive::derive_key_value_pair(input) -} - ///