From e663d359f655503489f26da33876d0319ebaea75 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:22:51 +0200 Subject: [PATCH 01/15] Update deprecated properties to use new aggregation attributes in `core.py`. --- flixopt/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/core.py b/flixopt/core.py index dea56ffb2..f7ff2a1df 100644 --- a/flixopt/core.py +++ b/flixopt/core.py @@ -142,12 +142,12 @@ def __repr__(self): @property def agg_group(self): warnings.warn('agg_group is deprecated, use aggregation_group instead', DeprecationWarning, stacklevel=2) - return self._aggregation_group + return self.aggregation_group @property def agg_weight(self): warnings.warn('agg_weight is deprecated, use aggregation_weight instead', DeprecationWarning, stacklevel=2) - return self._aggregation_weight + return self.aggregation_weight TemporalDataUser = ( From a6741942a62e3c5636237f0fca03901f8c2ba5f2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:23:34 +0200 Subject: [PATCH 02/15] Refactor `drop_constant_arrays` in `core.py` to improve clarity, add type hints, and enhance logging for dropped variables. --- flixopt/core.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/flixopt/core.py b/flixopt/core.py index f7ff2a1df..262732644 100644 --- a/flixopt/core.py +++ b/flixopt/core.py @@ -606,19 +606,35 @@ def get_dataarray_stats(arr: xr.DataArray) -> dict: return stats -def drop_constant_arrays(ds: xr.Dataset, dim='time', drop_arrays_without_dim: bool = True): - """Drop variables with very low variance (near-constant).""" +def drop_constant_arrays(ds: xr.Dataset, dim: str = 'time', drop_arrays_without_dim: bool = True) -> xr.Dataset: + """Drop variables with constant values along a dimension. + + Args: + ds: Input dataset to filter. + dim: Dimension along which to check for constant values. + drop_arrays_without_dim: If True, also drop variables that don't have the specified dimension. + + Returns: + Dataset with constant variables removed. + """ drop_vars = [] for name, da in ds.data_vars.items(): - if dim in da.dims: - if da.max(dim) == da.min(dim): + # Skip variables without the dimension + if dim not in da.dims: + if drop_arrays_without_dim: drop_vars.append(name) continue - elif drop_arrays_without_dim: + + # Check if variable is constant along the dimension + if (da.max(dim) == da.min(dim)).all(): drop_vars.append(name) - logger.debug(f'Dropping {len(drop_vars)} arrays with constant values') + if drop_vars: + logger.debug( + f'Dropping {len(drop_vars)} constant/dimension-less arrays: {drop_vars[:5]}{"..." if len(drop_vars) > 5 else ""}' + ) + return ds.drop_vars(drop_vars) From 7d20b9494c28f6776ba56ec401f67f3ad2ce9d11 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:24:08 +0200 Subject: [PATCH 03/15] Bugfix example_calculation_types.py and two_stage_optimization.py --- examples/03_Calculation_types/example_calculation_types.py | 2 +- examples/05_Two-stage-optimization/two_stage_optimization.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/03_Calculation_types/example_calculation_types.py b/examples/03_Calculation_types/example_calculation_types.py index c4f30be46..c23e14d0a 100644 --- a/examples/03_Calculation_types/example_calculation_types.py +++ b/examples/03_Calculation_types/example_calculation_types.py @@ -46,7 +46,7 @@ # TimeSeriesData objects TS_heat_demand = fx.TimeSeriesData(heat_demand) TS_electricity_demand = fx.TimeSeriesData(electricity_demand, aggregation_weight=0.7) - TS_electricity_price_sell = fx.TimeSeriesData(-(electricity_demand - 0.5), aggregation_group='p_el') + TS_electricity_price_sell = fx.TimeSeriesData(-(electricity_price - 0.5), aggregation_group='p_el') TS_electricity_price_buy = fx.TimeSeriesData(electricity_price + 0.5, aggregation_group='p_el') flow_system = fx.FlowSystem(timesteps) diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py index 77bd74a3b..52c47f006 100644 --- a/examples/05_Two-stage-optimization/two_stage_optimization.py +++ b/examples/05_Two-stage-optimization/two_stage_optimization.py @@ -118,6 +118,7 @@ calculation_sizing.solve(fx.solvers.HighsSolver(0.1 / 100, 600)) timer_sizing = timeit.default_timer() - start + start = timeit.default_timer() calculation_dispatch = fx.FullCalculation('Sizing', flow_system) calculation_dispatch.do_modeling() calculation_dispatch.fix_sizes(calculation_sizing.results.solution) From d4d0dda99efc21589305049a4187454700167e7b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:24:20 +0200 Subject: [PATCH 04/15] Use time selection more explicitly --- flixopt/calculation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index aa35e72f0..5ac0f6cd0 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -556,7 +556,7 @@ def _create_sub_calculations(self): for i, (segment_name, timesteps_of_segment) in enumerate( zip(self.segment_names, self._timesteps_per_segment, strict=True) ): - calc = FullCalculation(f'{self.name}-{segment_name}', self.flow_system.sel(timesteps_of_segment)) + calc = FullCalculation(f'{self.name}-{segment_name}', self.flow_system.sel(time=timesteps_of_segment)) calc.flow_system._connect_network() # Connect to have Correct names of Flows! self.sub_calculations.append(calc) From 12f120386461db36e5607df393be2743376f5b53 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:40:17 +0200 Subject: [PATCH 05/15] Refactor plausibility checks in `components.py` to handle string-based `initial_charge_state` more robustly and simplify capacity bounds retrieval using `InvestParameters`. --- flixopt/components.py | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index f35e193bb..6ad1098cb 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -472,17 +472,19 @@ def _plausibility_checks(self) -> None: Check for infeasible or uncommon combinations of parameters """ super()._plausibility_checks() + + # Validate string values and set flag + initial_is_last = False if isinstance(self.initial_charge_state, str): - if self.initial_charge_state != 'lastValueOfSim': + if self.initial_charge_state == 'lastValueOfSim': + initial_is_last = True + else: raise PlausibilityError(f'initial_charge_state has undefined value: {self.initial_charge_state}') - return + + # Use new InvestParameters methods to get capacity bounds if isinstance(self.capacity_in_flow_hours, InvestParameters): - if self.capacity_in_flow_hours.fixed_size is None: - maximum_capacity = self.capacity_in_flow_hours.maximum_size - minimum_capacity = self.capacity_in_flow_hours.minimum_size - else: - maximum_capacity = self.capacity_in_flow_hours.fixed_size - minimum_capacity = self.capacity_in_flow_hours.fixed_size + minimum_capacity = self.capacity_in_flow_hours.minimum_or_fixed_size + maximum_capacity = self.capacity_in_flow_hours.maximum_or_fixed_size else: maximum_capacity = self.capacity_in_flow_hours minimum_capacity = self.capacity_in_flow_hours @@ -492,16 +494,18 @@ def _plausibility_checks(self) -> None: # initial capacity <= allowed max for minimum_size: maximum_initial_capacity = minimum_capacity * self.relative_maximum_charge_state.isel(time=0) - if (self.initial_charge_state > maximum_initial_capacity).any(): - raise ValueError( - f'{self.label_full}: {self.initial_charge_state=} ' - f'is above allowed maximum charge_state {maximum_initial_capacity}' - ) - if (self.initial_charge_state < minimum_initial_capacity).any(): - raise ValueError( - f'{self.label_full}: {self.initial_charge_state=} ' - f'is below allowed minimum charge_state {minimum_initial_capacity}' - ) + # Only perform numeric comparisons if not using 'lastValueOfSim' + if not initial_is_last: + if (self.initial_charge_state > maximum_initial_capacity).any(): + raise PlausibilityError( + f'{self.label_full}: {self.initial_charge_state=} ' + f'is above allowed maximum charge_state {maximum_initial_capacity}' + ) + if (self.initial_charge_state < minimum_initial_capacity).any(): + raise PlausibilityError( + f'{self.label_full}: {self.initial_charge_state=} ' + f'is below allowed minimum charge_state {minimum_initial_capacity}' + ) if self.balanced: if not isinstance(self.charging.size, InvestParameters) or not isinstance( From 85d499ac9eb0650e1de720f114a48393f68ceba1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:41:03 +0200 Subject: [PATCH 06/15] Refactor `create_transmission_equation` in `components.py` to handle `relative_losses` gracefully when unset and simplify the constraint definition. --- flixopt/components.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flixopt/components.py b/flixopt/components.py index 6ad1098cb..687bf61e8 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -740,8 +740,9 @@ def _do_modeling(self): def create_transmission_equation(self, name: str, in_flow: Flow, out_flow: Flow) -> linopy.Constraint: """Creates an Equation for the Transmission efficiency and adds it to the model""" # eq: out(t) + on(t)*loss_abs(t) = in(t)*(1 - loss_rel(t)) + rel_losses = 0 if self.element.relative_losses is None else self.element.relative_losses con_transmission = self.add_constraints( - out_flow.submodel.flow_rate == -in_flow.submodel.flow_rate * (self.element.relative_losses - 1), + out_flow.submodel.flow_rate == in_flow.submodel.flow_rate * (1 - rel_losses), short_name=name, ) From c2ecc28059b129d91aaf01d6506f61a9223d873f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:42:26 +0200 Subject: [PATCH 07/15] Update pytest `addopts` formatting in `pyproject.toml` to work with both unix and windows --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 902694a82..7b2aef6c7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,7 +182,7 @@ markers = [ "slow: marks tests as slow", "examples: marks example tests (run only on releases)", ] -addopts = "-m 'not examples'" # Skip examples by default +addopts = '-m "not examples"' # Skip examples by default [tool.bandit] skips = ["B101", "B506"] # assert_used and yaml_load From 4248f81ae37f4f746599269892fb4e05011bf908 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:43:05 +0200 Subject: [PATCH 08/15] Refine null value handling when resolving dataarrays` to check for 'time' dimension before dropping all-null values. --- flixopt/structure.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flixopt/structure.py b/flixopt/structure.py index 3dffeb5c3..91d5a53c1 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -422,8 +422,9 @@ def _resolve_dataarray_reference( # Handle null values with warning if array.isnull().any(): - logger.warning(f"DataArray '{array_name}' contains null values. Dropping them.") - array = array.dropna(dim='time', how='all') + logger.warning(f"DataArray '{array_name}' contains null values. Dropping all-null along present dims.") + if 'time' in array.dims: + array = array.dropna(dim='time', how='all') # Check if this should be restored as TimeSeriesData if TimeSeriesData.is_timeseries_data(array): From 487cce4a3a83b84965582008b74f0a827adf9d4a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:45:47 +0200 Subject: [PATCH 09/15] Refactor flow system restoration to improve exception handling and ensure logger state resets. --- flixopt/results.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flixopt/results.py b/flixopt/results.py index f4ddc0071..44c7dad41 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -291,17 +291,18 @@ def flow_system(self) -> FlowSystem: """The restored flow_system that was used to create the calculation. Contains all input parameters.""" if self._flow_system is None: + current_logger_level = logger.getEffectiveLevel() + logger.setLevel(logging.CRITICAL) try: - current_logger_level = logger.getEffectiveLevel() - logger.setLevel(logging.CRITICAL) self._flow_system = FlowSystem.from_dataset(self.flow_system_data) self._flow_system._connect_network() - logger.setLevel(current_logger_level) except Exception as e: logger.critical( f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}' ) raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e + finally: + logger.setLevel(current_logger_level) return self._flow_system def filter_solution( From 97c258d9dcf245611069fb31d15ed2893d24f030 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:46:06 +0200 Subject: [PATCH 10/15] Refactor imports in `elements.py` to remove unused `ModelingPrimitives` from `features` and include it from `modeling` instead. --- flixopt/elements.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/elements.py b/flixopt/elements.py index 6ae1c2cf1..d094ed9e0 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -13,9 +13,9 @@ from .config import CONFIG from .core import PlausibilityError, Scalar, TemporalData, TemporalDataUser -from .features import InvestmentModel, ModelingPrimitives, OnOffModel +from .features import InvestmentModel, OnOffModel from .interface import InvestParameters, OnOffParameters -from .modeling import BoundingPatterns, ModelingUtilitiesAbstract +from .modeling import BoundingPatterns, ModelingPrimitives, ModelingUtilitiesAbstract from .structure import Element, ElementModel, FlowSystemModel, register_class_for_io if TYPE_CHECKING: From 37ff43a45e01f8ae5d91ecc0f1d614ad3bb4af5f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 29 Sep 2025 17:48:19 +0200 Subject: [PATCH 11/15] Refactor `count_consecutive_states` in `modeling.py` to enhance documentation, improve edge case handling, and simplify array processing. --- flixopt/modeling.py | 75 +++++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/flixopt/modeling.py b/flixopt/modeling.py index b03d431ba..cd1818da4 100644 --- a/flixopt/modeling.py +++ b/flixopt/modeling.py @@ -55,47 +55,58 @@ def to_binary( def count_consecutive_states( binary_values: xr.DataArray, dim: str = 'time', - epsilon: float = None, + epsilon: float | None = None, ) -> float: - """ - Counts the number of consecutive states in a binary time series. + """Count consecutive steps in the final active state of a binary time series. + + This function counts how many consecutive time steps the series remains "on" + (non-zero) at the end of the time series. If the final state is "off", returns 0. Args: - binary_values: Binary DataArray - dim: Dimension to count consecutive states over - epsilon: Tolerance for zero detection (uses CONFIG.modeling.EPSILON if None) + binary_values: Binary DataArray with values close to 0 (off) or 1 (on). + dim: Dimension along which to count consecutive states. + epsilon: Tolerance for zero detection. Uses CONFIG.modeling.EPSILON if None. Returns: - The consecutive number of steps spent in the final state of the timeseries + Sum of values in the final consecutive "on" period. Returns 0.0 if the + final state is "off". + + Examples: + >>> arr = xr.DataArray([0, 0, 1, 1, 1, 0, 1, 1], dims=['time']) + >>> count_consecutive_states(arr) + 2.0 # Two consecutive 1s at the end """ - if epsilon is None: - epsilon = CONFIG.modeling.EPSILON + epsilon = epsilon or CONFIG.modeling.EPSILON + + # Reduce to target dimension by taking any() over other dimensions + other_dims = [d for d in binary_values.dims if d != dim] + if other_dims: + binary_values = binary_values.any(dim=other_dims) - binary_values = binary_values.any(dim=[d for d in binary_values.dims if d != dim]) + # Convert to numpy array + arr = binary_values.values if hasattr(binary_values, 'values') else np.asarray(binary_values) - # Handle scalar case - if binary_values.ndim == 0: - return float(binary_values.item()) + # Flatten to 1D if needed + arr = arr.ravel() if arr.ndim > 1 else arr - # Check if final state is off - if np.isclose(binary_values.isel({dim: -1}), 0, atol=epsilon).all(): + # Handle edge cases + if arr.size == 0: return 0.0 + if arr.size == 1: + return float(arr[0]) if not np.isclose(arr[0], 0, atol=epsilon) else 0.0 - # Find consecutive 'on' period from the end - is_zero = np.isclose(binary_values, 0, atol=epsilon) + # Return 0 if final state is off + if np.isclose(arr[-1], 0, atol=epsilon): + return 0.0 - # Find the last zero, then sum everything after it + # Find the last zero position + is_zero = np.isclose(arr, 0, atol=epsilon) zero_indices = np.where(is_zero)[0] - if len(zero_indices) == 0: - # All 'on' - sum everything - start_idx = 0 - else: - # Start after last zero - start_idx = zero_indices[-1] + 1 - consecutive_values = binary_values.isel({dim: slice(start_idx, None)}) + # Calculate sum from last zero to end + start_idx = zero_indices[-1] + 1 if zero_indices.size > 0 else 0 - return float(consecutive_values.sum().item()) # TODO: Som only over one dim? + return float(np.sum(arr[start_idx:])) class ModelingUtilities: @@ -308,7 +319,13 @@ def consecutive_duration_tracking( ) # Handle initial condition for minimum duration - if previous_duration > 0 and previous_duration < minimum_duration.isel({duration_dim: 0}).max(): + prev = ( + float(previous_duration) + if not isinstance(previous_duration, xr.DataArray) + else float(previous_duration.max().item()) + ) + min0 = float(minimum_duration.isel(time=0).max().item()) + if prev > 0 and prev < min0: constraints['initial_lb'] = model.add_constraints( state_variable.isel({duration_dim: 0}) == 1, name=f'{duration.name}|initial_lb' ) @@ -435,7 +452,7 @@ def bounds_with_state( lower_bound, upper_bound = bounds name = name or f'{variable.name}' - if np.all(lower_bound - upper_bound) < 1e-10: + if np.all(np.isclose(lower_bound, upper_bound, atol=1e-10)): fix_constraint = model.add_constraints(variable == variable_state * upper_bound, name=f'{name}|fix') return [fix_constraint] @@ -481,7 +498,7 @@ def scaled_bounds( rel_lower, rel_upper = relative_bounds name = name or f'{variable.name}' - if np.abs(rel_lower - rel_upper).all() < 10e-10: + if np.all(np.isclose(rel_lower, rel_upper, atol=1e-10)): return [model.add_constraints(variable == scaling_variable * rel_lower, name=f'{name}|fixed')] upper_constraint = model.add_constraints(variable <= scaling_variable * rel_upper, name=f'{name}|ub') From b6b8ac30770fbd66cbe423c5edc5085afec25fc0 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:16:27 +0200 Subject: [PATCH 12/15] Refactor `drop_constant_arrays` to handle NaN cases with `skipna` and sort dropped variables for better logging; streamline logger state restoration in `results.py`. --- flixopt/core.py | 3 ++- flixopt/results.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/flixopt/core.py b/flixopt/core.py index 262732644..c163de554 100644 --- a/flixopt/core.py +++ b/flixopt/core.py @@ -627,10 +627,11 @@ def drop_constant_arrays(ds: xr.Dataset, dim: str = 'time', drop_arrays_without_ continue # Check if variable is constant along the dimension - if (da.max(dim) == da.min(dim)).all(): + if (da.max(dim, skipna=True) == da.min(dim, skipna=True)).all().item(): drop_vars.append(name) if drop_vars: + drop_vars = sorted(drop_vars) logger.debug( f'Dropping {len(drop_vars)} constant/dimension-less arrays: {drop_vars[:5]}{"..." if len(drop_vars) > 5 else ""}' ) diff --git a/flixopt/results.py b/flixopt/results.py index 44c7dad41..294039924 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -291,8 +291,8 @@ def flow_system(self) -> FlowSystem: """The restored flow_system that was used to create the calculation. Contains all input parameters.""" if self._flow_system is None: - current_logger_level = logger.getEffectiveLevel() - logger.setLevel(logging.CRITICAL) + old_level = logger.level + logger.level = logging.CRITICAL try: self._flow_system = FlowSystem.from_dataset(self.flow_system_data) self._flow_system._connect_network() @@ -302,7 +302,7 @@ def flow_system(self) -> FlowSystem: ) raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e finally: - logger.setLevel(current_logger_level) + logger.level = old_level return self._flow_system def filter_solution( From e69c504972e4fce3b09ada3e0c4ae4063f702cb3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:16:44 +0200 Subject: [PATCH 13/15] Temp --- flixopt/modeling.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/flixopt/modeling.py b/flixopt/modeling.py index cd1818da4..45a814afb 100644 --- a/flixopt/modeling.py +++ b/flixopt/modeling.py @@ -78,13 +78,17 @@ def count_consecutive_states( """ epsilon = epsilon or CONFIG.modeling.EPSILON - # Reduce to target dimension by taking any() over other dimensions - other_dims = [d for d in binary_values.dims if d != dim] - if other_dims: - binary_values = binary_values.any(dim=other_dims) - - # Convert to numpy array - arr = binary_values.values if hasattr(binary_values, 'values') else np.asarray(binary_values) + # Check if input is an xarray DataArray + if hasattr(binary_values, 'dims'): + # Reduce to target dimension by taking any() over other dimensions + other_dims = [d for d in binary_values.dims if d != dim] + if other_dims: + binary_values = binary_values.any(dim=other_dims) + # Convert xarray to numpy + arr = binary_values.values + else: + # Convert non-xarray input directly to numpy array + arr = np.asarray(binary_values) # Flatten to 1D if needed arr = arr.ravel() if arr.ndim > 1 else arr From 36f4986b62becca26b82bbe3eeac54b24fd0acce Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:21:14 +0200 Subject: [PATCH 14/15] Improve NAN handling in count_consecutive_states() --- flixopt/modeling.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/flixopt/modeling.py b/flixopt/modeling.py index 45a814afb..300da1589 100644 --- a/flixopt/modeling.py +++ b/flixopt/modeling.py @@ -53,7 +53,7 @@ def to_binary( @staticmethod def count_consecutive_states( - binary_values: xr.DataArray, + binary_values: xr.DataArray | np.ndarray | list[int, float], dim: str = 'time', epsilon: float | None = None, ) -> float: @@ -73,21 +73,23 @@ def count_consecutive_states( Examples: >>> arr = xr.DataArray([0, 0, 1, 1, 1, 0, 1, 1], dims=['time']) - >>> count_consecutive_states(arr) - 2.0 # Two consecutive 1s at the end + >>> ModelingUtilitiesAbstract.count_consecutive_states(arr) + 2.0 + + >>> arr = [0, 0, 1, 0, 1, 1, 1, 1] + >>> ModelingUtilitiesAbstract.count_consecutive_states(arr) + 4.0 """ epsilon = epsilon or CONFIG.modeling.EPSILON - # Check if input is an xarray DataArray - if hasattr(binary_values, 'dims'): - # Reduce to target dimension by taking any() over other dimensions + if isinstance(binary_values, xr.DataArray): + # xarray path other_dims = [d for d in binary_values.dims if d != dim] if other_dims: binary_values = binary_values.any(dim=other_dims) - # Convert xarray to numpy arr = binary_values.values else: - # Convert non-xarray input directly to numpy array + # numpy/array-like path arr = np.asarray(binary_values) # Flatten to 1D if needed @@ -103,7 +105,8 @@ def count_consecutive_states( if np.isclose(arr[-1], 0, atol=epsilon): return 0.0 - # Find the last zero position + # Find the last zero position (treat NaNs as off) + arr = np.nan_to_num(arr, nan=0.0) is_zero = np.isclose(arr, 0, atol=epsilon) zero_indices = np.where(is_zero)[0] @@ -328,7 +331,7 @@ def consecutive_duration_tracking( if not isinstance(previous_duration, xr.DataArray) else float(previous_duration.max().item()) ) - min0 = float(minimum_duration.isel(time=0).max().item()) + min0 = float(minimum_duration.isel({duration_dim: 0}).max().item()) if prev > 0 and prev < min0: constraints['initial_lb'] = model.add_constraints( state_variable.isel({duration_dim: 0}) == 1, name=f'{duration.name}|initial_lb' @@ -456,7 +459,7 @@ def bounds_with_state( lower_bound, upper_bound = bounds name = name or f'{variable.name}' - if np.all(np.isclose(lower_bound, upper_bound, atol=1e-10)): + if np.allclose(lower_bound, upper_bound, atol=1e-10, equal_nan=True): fix_constraint = model.add_constraints(variable == variable_state * upper_bound, name=f'{name}|fix') return [fix_constraint] @@ -502,7 +505,7 @@ def scaled_bounds( rel_lower, rel_upper = relative_bounds name = name or f'{variable.name}' - if np.all(np.isclose(rel_lower, rel_upper, atol=1e-10)): + if np.allclose(rel_lower, rel_upper, atol=1e-10, equal_nan=True): return [model.add_constraints(variable == scaling_variable * rel_lower, name=f'{name}|fixed')] upper_constraint = model.add_constraints(variable <= scaling_variable * rel_upper, name=f'{name}|ub') From c4b57cfff96807f29549cb86829691165332a6ee Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:26:24 +0200 Subject: [PATCH 15/15] Refactor plausibility checks in `components.py` to prevent initial capacity from constraining investment decisions and improve error messaging. --- flixopt/components.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 687bf61e8..01b6864e3 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -489,9 +489,8 @@ def _plausibility_checks(self) -> None: maximum_capacity = self.capacity_in_flow_hours minimum_capacity = self.capacity_in_flow_hours - # initial capacity >= allowed min for maximum_size: + # Initial capacity should not constraint investment decision minimum_initial_capacity = maximum_capacity * self.relative_minimum_charge_state.isel(time=0) - # initial capacity <= allowed max for minimum_size: maximum_initial_capacity = minimum_capacity * self.relative_maximum_charge_state.isel(time=0) # Only perform numeric comparisons if not using 'lastValueOfSim' @@ -499,12 +498,12 @@ def _plausibility_checks(self) -> None: if (self.initial_charge_state > maximum_initial_capacity).any(): raise PlausibilityError( f'{self.label_full}: {self.initial_charge_state=} ' - f'is above allowed maximum charge_state {maximum_initial_capacity}' + f'is constraining the investment decision. Chosse a value above {maximum_initial_capacity}' ) if (self.initial_charge_state < minimum_initial_capacity).any(): raise PlausibilityError( f'{self.label_full}: {self.initial_charge_state=} ' - f'is below allowed minimum charge_state {minimum_initial_capacity}' + f'is constraining the investment decision. Chosse a value below {minimum_initial_capacity}' ) if self.balanced: