diff --git a/CHANGELOG.md b/CHANGELOG.md index 872498d0d..9c774bfbc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -295,6 +295,7 @@ Note: `topology.plot()` now renders a Sankey diagram. The old PyVis visualizatio ### šŸ› Fixed - `temporal_weight` and `sum_temporal()` now use consistent implementation +- `FlowSystem.from_old_results()` now sets `previous_flow_rate=0` for flows of components with `status_parameters`, fixing startup cost calculation mismatch when re-optimizing migrated v4 results ### šŸ“ Docs diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index 5098e74a1..2bc79d6b4 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -31,6 +31,15 @@ from ..statistics_accessor import SelectType +def _select_dims(da: xr.DataArray, period: str | None = None, scenario: str | None = None) -> xr.DataArray: + """Select from DataArray by period/scenario if those dimensions exist.""" + if 'period' in da.dims and period is not None: + da = da.sel(period=period) + if 'scenario' in da.dims and scenario is not None: + da = da.sel(scenario=scenario) + return da + + @dataclass class ClusterStructure: """Structure information for inter-cluster storage linking. @@ -152,12 +161,7 @@ def get_cluster_order_for_slice(self, period: str | None = None, scenario: str | Returns: 1D numpy array of cluster indices for the specified slice. """ - order = self.cluster_order - if 'period' in order.dims and period is not None: - order = order.sel(period=period) - if 'scenario' in order.dims and scenario is not None: - order = order.sel(scenario=scenario) - return order.values.astype(int) + return _select_dims(self.cluster_order, period, scenario).values.astype(int) def get_cluster_occurrences_for_slice( self, period: str | None = None, scenario: str | None = None @@ -170,13 +174,18 @@ def get_cluster_occurrences_for_slice( Returns: Dict mapping cluster ID to occurrence count. + + Raises: + ValueError: If period/scenario dimensions exist but no selector was provided. """ - occurrences = self.cluster_occurrences - if 'period' in occurrences.dims and period is not None: - occurrences = occurrences.sel(period=period) - if 'scenario' in occurrences.dims and scenario is not None: - occurrences = occurrences.sel(scenario=scenario) - return {int(c): int(occurrences.sel(cluster=c).values) for c in occurrences.coords['cluster'].values} + occ = _select_dims(self.cluster_occurrences, period, scenario) + extra_dims = [d for d in occ.dims if d != 'cluster'] + if extra_dims: + raise ValueError( + f'cluster_occurrences has dimensions {extra_dims} that were not selected. ' + f"Provide 'period' and/or 'scenario' arguments to select a specific slice." + ) + return {int(c): int(occ.sel(cluster=c).values) for c in occ.coords['cluster'].values} def plot(self, colors: str | list[str] | None = None, show: bool | None = None) -> PlotResult: """Plot cluster assignment visualization. @@ -372,12 +381,7 @@ def get_timestep_mapping_for_slice(self, period: str | None = None, scenario: st Returns: 1D numpy array of representative timestep indices for the specified slice. """ - mapping = self.timestep_mapping - if 'period' in mapping.dims and period is not None: - mapping = mapping.sel(period=period) - if 'scenario' in mapping.dims and scenario is not None: - mapping = mapping.sel(scenario=scenario) - return mapping.values.astype(int) + return _select_dims(self.timestep_mapping, period, scenario).values.astype(int) def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | None = None) -> xr.DataArray: """Expand aggregated data back to original timesteps. @@ -400,89 +404,61 @@ def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | No >>> expanded = result.expand_data(aggregated_values) >>> len(expanded.time) == len(original_timesteps) # True """ - import pandas as pd - if original_time is None: if self.original_data is None: raise ValueError('original_time required when original_data is not available') original_time = self.original_data.coords['time'] timestep_mapping = self.timestep_mapping - has_periods = 'period' in timestep_mapping.dims - has_scenarios = 'scenario' in timestep_mapping.dims has_cluster_dim = 'cluster' in aggregated.dims - - # Simple case: no period/scenario dimensions - if not has_periods and not has_scenarios: - mapping = timestep_mapping.values + timesteps_per_cluster = self.cluster_structure.timesteps_per_cluster if has_cluster_dim else None + + def _expand_slice(mapping: np.ndarray, data: xr.DataArray) -> np.ndarray: + """Expand a single slice using the mapping.""" + # Validate that data has only expected dimensions for indexing + expected_dims = {'cluster', 'time'} if has_cluster_dim else {'time'} + actual_dims = set(data.dims) + unexpected_dims = actual_dims - expected_dims + if unexpected_dims: + raise ValueError( + f'Data slice has unexpected dimensions {unexpected_dims}. ' + f'Expected only {expected_dims}. Make sure period/scenario selections are applied.' + ) if has_cluster_dim: - # 2D cluster structure: convert flat indices to (cluster, time_within) - # Use cluster_structure's timesteps_per_cluster, not aggregated.sizes['time'] - # because the solution may include extra timesteps (timesteps_extra) - timesteps_per_cluster = self.cluster_structure.timesteps_per_cluster cluster_ids = mapping // timesteps_per_cluster time_within = mapping % timesteps_per_cluster - expanded_values = aggregated.values[cluster_ids, time_within] - else: - expanded_values = aggregated.values[mapping] - return xr.DataArray( - expanded_values, - coords={'time': original_time}, - dims=['time'], - attrs=aggregated.attrs, - ) + return data.values[cluster_ids, time_within] + return data.values[mapping] - # Multi-dimensional: expand each (period, scenario) slice and recombine - periods = list(timestep_mapping.coords['period'].values) if has_periods else [None] - scenarios = list(timestep_mapping.coords['scenario'].values) if has_scenarios else [None] - - expanded_slices: dict[tuple, xr.DataArray] = {} - for p in periods: - for s in scenarios: - # Get mapping for this slice - mapping_slice = timestep_mapping - if p is not None: - mapping_slice = mapping_slice.sel(period=p) - if s is not None: - mapping_slice = mapping_slice.sel(scenario=s) - mapping = mapping_slice.values - - # Select the data slice - selector = {} - if p is not None and 'period' in aggregated.dims: - selector['period'] = p - if s is not None and 'scenario' in aggregated.dims: - selector['scenario'] = s - - slice_da = aggregated.sel(**selector, drop=True) if selector else aggregated - - if has_cluster_dim: - # 2D cluster structure: convert flat indices to (cluster, time_within) - # Use cluster_structure's timesteps_per_cluster, not slice_da.sizes['time'] - # because the solution may include extra timesteps (timesteps_extra) - timesteps_per_cluster = self.cluster_structure.timesteps_per_cluster - cluster_ids = mapping // timesteps_per_cluster - time_within = mapping % timesteps_per_cluster - expanded_values = slice_da.values[cluster_ids, time_within] - expanded = xr.DataArray(expanded_values, dims=['time']) - else: - expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) - expanded_slices[(p, s)] = expanded.assign_coords(time=original_time) - - # Recombine slices using xr.concat - if has_periods and has_scenarios: - period_arrays = [] - for p in periods: - scenario_arrays = [expanded_slices[(p, s)] for s in scenarios] - period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) - result = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) - elif has_periods: - result = xr.concat([expanded_slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period')) - else: - result = xr.concat( - [expanded_slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario') + # Simple case: no period/scenario dimensions + extra_dims = [d for d in timestep_mapping.dims if d != 'original_time'] + if not extra_dims: + expanded_values = _expand_slice(timestep_mapping.values, aggregated) + return xr.DataArray(expanded_values, coords={'time': original_time}, dims=['time'], attrs=aggregated.attrs) + + # Multi-dimensional: expand each slice and recombine + dim_coords = {d: list(timestep_mapping.coords[d].values) for d in extra_dims} + expanded_slices = {} + for combo in np.ndindex(*[len(v) for v in dim_coords.values()]): + selector = {d: dim_coords[d][i] for d, i in zip(extra_dims, combo, strict=True)} + mapping = _select_dims(timestep_mapping, **selector).values + data_slice = ( + _select_dims(aggregated, **selector) if any(d in aggregated.dims for d in selector) else aggregated + ) + expanded_slices[tuple(selector.values())] = xr.DataArray( + _expand_slice(mapping, data_slice), coords={'time': original_time}, dims=['time'] ) + # Concatenate iteratively along each extra dimension + result_arrays = expanded_slices + for dim in reversed(extra_dims): + dim_vals = dim_coords[dim] + grouped = {} + for key, arr in result_arrays.items(): + rest_key = key[:-1] if len(key) > 1 else () + grouped.setdefault(rest_key, []).append(arr) + result_arrays = {k: xr.concat(v, dim=pd.Index(dim_vals, name=dim)) for k, v in grouped.items()} + result = list(result_arrays.values())[0] return result.transpose('time', ...).assign_attrs(aggregated.attrs) def validate(self) -> None: @@ -748,8 +724,6 @@ def heatmap( PlotResult containing the heatmap figure and cluster assignment data. The data has 'cluster' variable with time dimension, matching original timesteps. """ - import pandas as pd - from ..config import CONFIG from ..plot_result import PlotResult from ..statistics_accessor import _apply_selection @@ -760,63 +734,35 @@ def heatmap( raise ValueError('No cluster structure available') cluster_order_da = cs.cluster_order - timesteps_per_period = cs.timesteps_per_cluster + timesteps_per_cluster = cs.timesteps_per_cluster original_time = result.original_data.coords['time'] if result.original_data is not None else None # Apply selection if provided if select: cluster_order_da = _apply_selection(cluster_order_da.to_dataset(name='cluster'), select)['cluster'] - # Check for multi-dimensional data - has_periods = 'period' in cluster_order_da.dims - has_scenarios = 'scenario' in cluster_order_da.dims - - # Get dimension values - periods = list(cluster_order_da.coords['period'].values) if has_periods else [None] - scenarios = list(cluster_order_da.coords['scenario'].values) if has_scenarios else [None] - - # Build cluster assignment per timestep for each (period, scenario) slice - cluster_slices: dict[tuple, xr.DataArray] = {} - for p in periods: - for s in scenarios: - cluster_order = cs.get_cluster_order_for_slice(period=p, scenario=s) - # Expand: each cluster repeated timesteps_per_period times - cluster_per_timestep = np.repeat(cluster_order, timesteps_per_period) - cluster_slices[(p, s)] = xr.DataArray( - cluster_per_timestep, - dims=['time'], - coords={'time': original_time} if original_time is not None else None, - ) - - # Combine slices into multi-dimensional DataArray - if has_periods and has_scenarios: - period_arrays = [] - for p in periods: - scenario_arrays = [cluster_slices[(p, s)] for s in scenarios] - period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) - cluster_da = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) - elif has_periods: - cluster_da = xr.concat( - [cluster_slices[(p, None)] for p in periods], - dim=pd.Index(periods, name='period'), - ) - elif has_scenarios: - cluster_da = xr.concat( - [cluster_slices[(None, s)] for s in scenarios], - dim=pd.Index(scenarios, name='scenario'), + # Expand cluster_order to per-timestep: repeat each value timesteps_per_cluster times + # Uses np.repeat along axis=0 (original_cluster dim) + extra_dims = [d for d in cluster_order_da.dims if d != 'original_cluster'] + expanded_values = np.repeat(cluster_order_da.values, timesteps_per_cluster, axis=0) + + # Validate length consistency when using original time coordinates + if original_time is not None and len(original_time) != expanded_values.shape[0]: + raise ValueError( + f'Length mismatch: original_time has {len(original_time)} elements but expanded ' + f'cluster data has {expanded_values.shape[0]} elements ' + f'(n_clusters={cluster_order_da.sizes.get("original_cluster", len(cluster_order_da))} * ' + f'timesteps_per_cluster={timesteps_per_cluster})' ) - else: - cluster_da = cluster_slices[(None, None)] + + coords = {'time': original_time} if original_time is not None else {} + coords.update({d: cluster_order_da.coords[d].values for d in extra_dims}) + cluster_da = xr.DataArray(expanded_values, dims=['time'] + extra_dims, coords=coords) # Add dummy y dimension for heatmap visualization (single row) - heatmap_da = cluster_da.expand_dims('y', axis=-1) - heatmap_da = heatmap_da.assign_coords(y=['Cluster']) + heatmap_da = cluster_da.expand_dims('y', axis=-1).assign_coords(y=['Cluster']) heatmap_da.name = 'cluster_assignment' - - # Reorder dims so 'time' and 'y' are first (heatmap x/y axes) - # Other dims (period, scenario) will be used for faceting/animation - target_order = ['time', 'y'] + [d for d in heatmap_da.dims if d not in ('time', 'y')] - heatmap_da = heatmap_da.transpose(*target_order) + heatmap_da = heatmap_da.transpose('time', 'y', ...) # Use fxplot.heatmap for smart defaults fig = heatmap_da.fxplot.heatmap( diff --git a/flixopt/comparison.py b/flixopt/comparison.py index b870eec3f..2892f0124 100644 --- a/flixopt/comparison.py +++ b/flixopt/comparison.py @@ -97,6 +97,7 @@ def __init__(self, flow_systems: list[FlowSystem], names: list[str] | None = Non # Caches self._solution: xr.Dataset | None = None self._statistics: ComparisonStatistics | None = None + self._inputs: xr.Dataset | None = None # Core dimensions that must match across FlowSystems # Note: 'cluster' and 'cluster_boundary' are auxiliary dimensions from clustering @@ -190,6 +191,36 @@ def diff(self, reference: str | int = 0) -> xr.Dataset: ref_data = self.solution.isel(case=ref_idx) return self.solution - ref_data + @property + def inputs(self) -> xr.Dataset: + """Combined input data Dataset with 'case' dimension. + + Concatenates input parameters from all FlowSystems. Each FlowSystem's + ``.inputs`` Dataset is combined with a 'case' dimension. + + Returns: + xr.Dataset with all input parameters. Variable naming follows + the pattern ``{element.label_full}|{parameter_name}``. + + Examples: + ```python + comp = fx.Comparison([fs1, fs2], names=['Base', 'Modified']) + comp.inputs # All inputs with 'case' dimension + comp.inputs['Boiler(Q_th)|relative_minimum'] # Specific parameter + ``` + """ + if self._inputs is None: + self._inputs = xr.concat( + [ + fs.to_dataset(include_solution=False).expand_dims(case=[name]) + for fs, name in zip(self._systems, self._names, strict=True) + ], + dim='case', + join='outer', + fill_value=float('nan'), + ) + return self._inputs + class ComparisonStatistics: """Combined statistics accessor for comparing FlowSystems. diff --git a/flixopt/config.py b/flixopt/config.py index 3bc3d5ebf..c0029b609 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -164,7 +164,16 @@ def format(self, record): 'default_sequential_colorscale': 'turbo', 'default_qualitative_colorscale': 'plotly', 'default_line_shape': 'hv', - 'dim_priority': ('time', 'duration', 'duration_pct', 'variable', 'cluster', 'period', 'scenario'), + 'dim_priority': ( + 'time', + 'duration', + 'duration_pct', + 'case', + 'variable', + 'cluster', + 'period', + 'scenario', + ), 'slot_priority': ('x', 'color', 'facet_col', 'facet_row', 'animation_frame'), } ), diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 17f74f428..1f4c9b9a9 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -944,6 +944,15 @@ def from_old_results(cls, folder: str | pathlib.Path, name: str) -> FlowSystem: flow_system = cls.from_dataset(flow_system_data) flow_system.name = name + # Set previous_flow_rate=0 for flows of components with status_parameters + # In v4 API, previous_flow_rate=None defaulted to previous_status=0 (off) + # Now previous_flow_rate=None means relaxed (no constraint at t=0) + for comp in flow_system.components.values(): + if getattr(comp, 'status_parameters', None) is not None: + for flow in comp.inputs + comp.outputs: + if flow.previous_flow_rate is None: + flow.previous_flow_rate = 0 + # Attach solution (convert attrs from dicts to JSON strings for consistency) for key in ['Components', 'Buses', 'Effects', 'Flows']: if key in solution.attrs and isinstance(solution.attrs[key], dict): diff --git a/flixopt/modeling.py b/flixopt/modeling.py index 6b81a0a4a..f22ffbc04 100644 --- a/flixopt/modeling.py +++ b/flixopt/modeling.py @@ -251,7 +251,7 @@ def consecutive_duration_tracking( maximum_duration: xr.DataArray | None = None, duration_dim: str = 'time', duration_per_step: int | float | xr.DataArray = None, - previous_duration: xr.DataArray = 0, + previous_duration: xr.DataArray | float | int | None = None, ) -> tuple[dict[str, linopy.Variable], dict[str, linopy.Constraint]]: """Creates consecutive duration tracking for a binary state variable. @@ -278,16 +278,23 @@ def consecutive_duration_tracking( maximum_duration: Optional maximum consecutive duration (upper bound on duration variable) duration_dim: Dimension name to track duration along (default 'time') duration_per_step: Time increment per step in duration_dim - previous_duration: Initial duration value before first timestep (default 0) + previous_duration: Initial duration value before first timestep. If None (default), + no initial constraint is added (relaxed initial state). Returns: - Tuple of (duration_variable, constraints_dict) - where constraints_dict contains: 'ub', 'forward', 'backward', 'initial', and optionally 'lb', 'initial_lb' + Tuple of (variables_dict, constraints_dict). + variables_dict contains: 'duration'. + constraints_dict always contains: 'ub', 'forward', 'backward'. + When previous_duration is not None, also contains: 'initial'. + When minimum_duration is provided, also contains: 'lb'. + When minimum_duration is provided and previous_duration is not None and + 0 < previous_duration < minimum_duration[0], also contains: 'initial_lb'. """ if not isinstance(model, Submodel): raise ValueError('ModelingPrimitives.consecutive_duration_tracking() can only be used with a Submodel') - mega = duration_per_step.sum(duration_dim) + previous_duration # Big-M value + # Big-M value (use 0 for previous_duration if None) + mega = duration_per_step.sum(duration_dim) + (previous_duration if previous_duration is not None else 0) # Duration variable duration = model.add_variables( @@ -320,11 +327,13 @@ def consecutive_duration_tracking( ) # Initial condition: duration[0] = (duration_per_step[0] + previous_duration) * state[0] - constraints['initial'] = model.add_constraints( - duration.isel({duration_dim: 0}) - == (duration_per_step.isel({duration_dim: 0}) + previous_duration) * state.isel({duration_dim: 0}), - name=f'{duration.name}|initial', - ) + # Skipped if previous_duration is None (unconstrained initial state) + if previous_duration is not None: + constraints['initial'] = model.add_constraints( + duration.isel({duration_dim: 0}) + == (duration_per_step.isel({duration_dim: 0}) + previous_duration) * state.isel({duration_dim: 0}), + name=f'{duration.name}|initial', + ) # Minimum duration constraint if provided if minimum_duration is not None: @@ -335,17 +344,18 @@ def consecutive_duration_tracking( name=f'{duration.name}|lb', ) - # Handle initial condition for minimum duration - prev = ( - float(previous_duration) - if not isinstance(previous_duration, xr.DataArray) - else float(previous_duration.max().item()) - ) - min0 = float(minimum_duration.isel({duration_dim: 0}).max().item()) - if prev > 0 and prev < min0: - constraints['initial_lb'] = model.add_constraints( - state.isel({duration_dim: 0}) == 1, name=f'{duration.name}|initial_lb' + # Handle initial condition for minimum duration (skip if previous_duration is None) + if previous_duration is not None: + prev = ( + float(previous_duration) + if not isinstance(previous_duration, xr.DataArray) + else float(previous_duration.max().item()) ) + min0 = float(minimum_duration.isel({duration_dim: 0}).max().item()) + if prev > 0 and prev < min0: + constraints['initial_lb'] = model.add_constraints( + state.isel({duration_dim: 0}) == 1, name=f'{duration.name}|initial_lb' + ) variables = {'duration': duration} @@ -578,9 +588,9 @@ def state_transition_bounds( activate: linopy.Variable, deactivate: linopy.Variable, name: str, - previous_state: float | xr.DataArray = 0, + previous_state: float | xr.DataArray | None = 0, coord: str = 'time', - ) -> tuple[linopy.Constraint, linopy.Constraint, linopy.Constraint]: + ) -> tuple[linopy.Constraint, linopy.Constraint | None, linopy.Constraint]: """Creates state transition constraints for binary state variables. Tracks transitions between active (1) and inactive (0) states using @@ -598,11 +608,13 @@ def state_transition_bounds( activate: Binary variable for transitions from inactive to active (0→1) deactivate: Binary variable for transitions from active to inactive (1→0) name: Base name for constraints - previous_state: State value before first timestep (default 0) + previous_state: State value before first timestep (default 0). If None, + no initial constraint is added (relaxed initial state). coord: Time dimension name (default 'time') Returns: - Tuple of (transition_constraint, initial_constraint, mutex_constraint) + Tuple of (transition_constraint, initial_constraint, mutex_constraint). + initial_constraint is None when previous_state is None. """ if not isinstance(model, Submodel): raise ValueError('BoundingPatterns.state_transition_bounds() can only be used with a Submodel') @@ -614,11 +626,14 @@ def state_transition_bounds( name=f'{name}|transition', ) - # Initial state transition for t = 0 - initial = model.add_constraints( - activate.isel({coord: 0}) - deactivate.isel({coord: 0}) == state.isel({coord: 0}) - previous_state, - name=f'{name}|initial', - ) + # Initial state transition for t = 0 (skipped if previous_state is None for unconstrained) + if previous_state is not None: + initial = model.add_constraints( + activate.isel({coord: 0}) - deactivate.isel({coord: 0}) == state.isel({coord: 0}) - previous_state, + name=f'{name}|initial', + ) + else: + initial = None # At most one transition per timestep (mutual exclusivity) mutex = model.add_constraints(activate + deactivate <= 1, name=f'{name}|mutex') diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 3e3e1a876..0b1e581da 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -625,7 +625,12 @@ def _create_template_for_mode(self, mode: Literal['temporal', 'periodic', 'total """Create a template DataArray with the correct dimensions for a given mode.""" coords = {} if mode == 'temporal': - coords['time'] = self._fs.timesteps + # Use solution's time coordinates if available (handles expanded solutions with extra timestep) + solution = self._fs.solution + if solution is not None and 'time' in solution.dims: + coords['time'] = solution.coords['time'].values + else: + coords['time'] = self._fs.timesteps if self._fs.periods is not None: coords['period'] = self._fs.periods if self._fs.scenarios is not None: diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 969ce78ae..ce0fbec47 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -828,22 +828,29 @@ def cluster( # First, get the metric columns from any non-empty DataFrame sample_df = next(iter(non_empty_metrics.values())) metric_names = list(sample_df.columns) - time_series_names = list(sample_df.index) # Build DataArrays for each metric data_vars = {} for metric in metric_names: # Shape: (time_series, period?, scenario?) + # Each slice needs its own coordinates since different periods/scenarios + # may have different time series (after drop_constant_arrays) slices = {} for (p, s), df in clustering_metrics_all.items(): if df.empty: - # Use NaN for failed metrics - slices[(p, s)] = xr.DataArray(np.full(len(time_series_names), np.nan), dims=['time_series']) + # Use NaN for failed metrics - use sample_df index as fallback + slices[(p, s)] = xr.DataArray( + np.full(len(sample_df.index), np.nan), + dims=['time_series'], + coords={'time_series': list(sample_df.index)}, + ) else: - slices[(p, s)] = xr.DataArray(df[metric].values, dims=['time_series']) + # Use this DataFrame's own index as coordinates + slices[(p, s)] = xr.DataArray( + df[metric].values, dims=['time_series'], coords={'time_series': list(df.index)} + ) da = self._combine_slices_to_dataarray_generic(slices, ['time_series'], periods, scenarios, metric) - da = da.assign_coords(time_series=time_series_names) data_vars[metric] = da clustering_metrics = xr.Dataset(data_vars) @@ -1137,17 +1144,33 @@ def _combine_slices_to_dataarray_generic( return slices[first_key].rename(name) # Multi-dimensional: use xr.concat to stack along period/scenario dims + # Use join='outer' to handle cases where different periods/scenarios have different + # coordinate values (e.g., different time_series after drop_constant_arrays) if has_periods and has_scenarios: # Stack scenarios first, then periods period_arrays = [] for p in periods: scenario_arrays = [slices[(p, s)] for s in scenarios] - period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) - result = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) + period_arrays.append( + xr.concat( + scenario_arrays, dim=pd.Index(scenarios, name='scenario'), join='outer', fill_value=np.nan + ) + ) + result = xr.concat(period_arrays, dim=pd.Index(periods, name='period'), join='outer', fill_value=np.nan) elif has_periods: - result = xr.concat([slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period')) + result = xr.concat( + [slices[(p, None)] for p in periods], + dim=pd.Index(periods, name='period'), + join='outer', + fill_value=np.nan, + ) else: - result = xr.concat([slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario')) + result = xr.concat( + [slices[(None, s)] for s in scenarios], + dim=pd.Index(scenarios, name='scenario'), + join='outer', + fill_value=np.nan, + ) # Put base dimension first (standard order) result = result.transpose(base_dims[0], ...) diff --git a/tests/deprecated/conftest.py b/tests/deprecated/conftest.py index 65434f04c..efa9fa119 100644 --- a/tests/deprecated/conftest.py +++ b/tests/deprecated/conftest.py @@ -616,7 +616,9 @@ def flow_system_long(): thermal_efficiency=(eta_th := 0.58), electrical_efficiency=(eta_el := 0.22), status_parameters=fx.StatusParameters(effects_per_startup=24000), - fuel_flow=fx.Flow('Q_fu', bus='Kohle', size=(fuel_size := 288), relative_minimum=87 / fuel_size), + fuel_flow=fx.Flow( + 'Q_fu', bus='Kohle', size=(fuel_size := 288), relative_minimum=87 / fuel_size, previous_flow_rate=0 + ), electrical_flow=fx.Flow('P_el', bus='Strom', size=fuel_size * eta_el), thermal_flow=fx.Flow('Q_th', bus='FernwƤrme', size=fuel_size * eta_th), ), diff --git a/tests/deprecated/test_component.py b/tests/deprecated/test_component.py index 497a5c3aa..765a08c5a 100644 --- a/tests/deprecated/test_component.py +++ b/tests/deprecated/test_component.py @@ -409,11 +409,18 @@ def test_previous_states_with_multiple_flows_parameterized( flow_system.add_elements(comp) create_linopy_model(flow_system) - assert_conequal( - comp.submodel.constraints['TestComponent|uptime|initial'], - comp.submodel.variables['TestComponent|uptime'].isel(time=0) - == comp.submodel.variables['TestComponent|status'].isel(time=0) * (previous_on_hours + 1), - ) + # Check if any flow has previous_flow_rate set (determines if initial constraint exists) + has_previous = any( + x is not None for x in [in1_previous_flow_rate, out1_previous_flow_rate, out2_previous_flow_rate] + ) + if has_previous: + assert_conequal( + comp.submodel.constraints['TestComponent|uptime|initial'], + comp.submodel.variables['TestComponent|uptime'].isel(time=0) + == comp.submodel.variables['TestComponent|status'].isel(time=0) * (previous_on_hours + 1), + ) + else: + assert 'TestComponent|uptime|initial' not in comp.submodel.constraints class TestTransmissionModel: diff --git a/tests/deprecated/test_flow.py b/tests/deprecated/test_flow.py index 8e1ce1f53..487a96c25 100644 --- a/tests/deprecated/test_flow.py +++ b/tests/deprecated/test_flow.py @@ -652,6 +652,7 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf 'WƤrme', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required to get initial constraint status_parameters=fx.StatusParameters( min_uptime=2, # Must run for at least 2 hours when turned on max_uptime=8, # Can't run more than 8 consecutive hours @@ -815,6 +816,7 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con 'WƤrme', bus='FernwƤrme', size=100, + previous_flow_rate=0, # System was OFF for 1 hour before start - required for initial constraint status_parameters=fx.StatusParameters( min_downtime=4, # Must stay inactive for at least 4 hours when shut down max_downtime=12, # Can't be inactive for more than 12 consecutive hours @@ -982,6 +984,7 @@ def test_switch_on_constraints(self, basic_flow_system_linopy_coords, coords_con 'WƤrme', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required for initial constraint status_parameters=fx.StatusParameters( startup_limit=5, # Maximum 5 startups effects_per_startup={'costs': 100}, # 100 EUR startup cost diff --git a/tests/deprecated/test_functional.py b/tests/deprecated/test_functional.py index 409e20a5f..14be26a4c 100644 --- a/tests/deprecated/test_functional.py +++ b/tests/deprecated/test_functional.py @@ -623,6 +623,7 @@ def test_consecutive_uptime_downtime(solver_fixture, time_steps_fixture): 'Q_th', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required for initial constraint status_parameters=fx.StatusParameters(max_uptime=2, min_uptime=2), ), ), diff --git a/tests/test_component.py b/tests/test_component.py index 66d09aaee..626a64272 100644 --- a/tests/test_component.py +++ b/tests/test_component.py @@ -409,11 +409,18 @@ def test_previous_states_with_multiple_flows_parameterized( flow_system.add_elements(comp) create_linopy_model(flow_system) - assert_conequal( - comp.submodel.constraints['TestComponent|uptime|initial'], - comp.submodel.variables['TestComponent|uptime'].isel(time=0) - == comp.submodel.variables['TestComponent|status'].isel(time=0) * (previous_on_hours + 1), - ) + # Initial constraint only exists when at least one flow has previous_flow_rate set + has_previous = any( + x is not None for x in [in1_previous_flow_rate, out1_previous_flow_rate, out2_previous_flow_rate] + ) + if has_previous: + assert_conequal( + comp.submodel.constraints['TestComponent|uptime|initial'], + comp.submodel.variables['TestComponent|uptime'].isel(time=0) + == comp.submodel.variables['TestComponent|status'].isel(time=0) * (previous_on_hours + 1), + ) + else: + assert 'TestComponent|uptime|initial' not in comp.submodel.constraints class TestTransmissionModel: diff --git a/tests/test_flow.py b/tests/test_flow.py index 8e1ce1f53..275963348 100644 --- a/tests/test_flow.py +++ b/tests/test_flow.py @@ -652,6 +652,7 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf 'WƤrme', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required to get initial constraint status_parameters=fx.StatusParameters( min_uptime=2, # Must run for at least 2 hours when turned on max_uptime=8, # Can't run more than 8 consecutive hours @@ -815,6 +816,7 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con 'WƤrme', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required to get initial constraint (was OFF for 1h, so previous_downtime=1) status_parameters=fx.StatusParameters( min_downtime=4, # Must stay inactive for at least 4 hours when shut down max_downtime=12, # Can't be inactive for more than 12 consecutive hours @@ -982,6 +984,7 @@ def test_switch_on_constraints(self, basic_flow_system_linopy_coords, coords_con 'WƤrme', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required to get initial constraint status_parameters=fx.StatusParameters( startup_limit=5, # Maximum 5 startups effects_per_startup={'costs': 100}, # 100 EUR startup cost diff --git a/tests/test_functional.py b/tests/test_functional.py index 6d0f8a8fc..68f6b9e84 100644 --- a/tests/test_functional.py +++ b/tests/test_functional.py @@ -604,6 +604,7 @@ def test_consecutive_uptime_downtime(solver_fixture, time_steps_fixture): 'Q_th', bus='FernwƤrme', size=100, + previous_flow_rate=0, # Required for initial uptime constraint status_parameters=fx.StatusParameters(max_uptime=2, min_uptime=2), ), ),