diff --git a/examples/03_Calculation_types/example_calculation_types.py b/examples/03_Calculation_types/example_calculation_types.py index c4f30be46..c23e14d0a 100644 --- a/examples/03_Calculation_types/example_calculation_types.py +++ b/examples/03_Calculation_types/example_calculation_types.py @@ -46,7 +46,7 @@ # TimeSeriesData objects TS_heat_demand = fx.TimeSeriesData(heat_demand) TS_electricity_demand = fx.TimeSeriesData(electricity_demand, aggregation_weight=0.7) - TS_electricity_price_sell = fx.TimeSeriesData(-(electricity_demand - 0.5), aggregation_group='p_el') + TS_electricity_price_sell = fx.TimeSeriesData(-(electricity_price - 0.5), aggregation_group='p_el') TS_electricity_price_buy = fx.TimeSeriesData(electricity_price + 0.5, aggregation_group='p_el') flow_system = fx.FlowSystem(timesteps) diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py index 77bd74a3b..52c47f006 100644 --- a/examples/05_Two-stage-optimization/two_stage_optimization.py +++ b/examples/05_Two-stage-optimization/two_stage_optimization.py @@ -118,6 +118,7 @@ calculation_sizing.solve(fx.solvers.HighsSolver(0.1 / 100, 600)) timer_sizing = timeit.default_timer() - start + start = timeit.default_timer() calculation_dispatch = fx.FullCalculation('Sizing', flow_system) calculation_dispatch.do_modeling() calculation_dispatch.fix_sizes(calculation_sizing.results.solution) diff --git a/flixopt/calculation.py b/flixopt/calculation.py index aa35e72f0..5ac0f6cd0 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -556,7 +556,7 @@ def _create_sub_calculations(self): for i, (segment_name, timesteps_of_segment) in enumerate( zip(self.segment_names, self._timesteps_per_segment, strict=True) ): - calc = FullCalculation(f'{self.name}-{segment_name}', self.flow_system.sel(timesteps_of_segment)) + calc = FullCalculation(f'{self.name}-{segment_name}', self.flow_system.sel(time=timesteps_of_segment)) calc.flow_system._connect_network() # Connect to have Correct names of Flows! self.sub_calculations.append(calc) diff --git a/flixopt/components.py b/flixopt/components.py index f35e193bb..01b6864e3 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -472,36 +472,39 @@ def _plausibility_checks(self) -> None: Check for infeasible or uncommon combinations of parameters """ super()._plausibility_checks() + + # Validate string values and set flag + initial_is_last = False if isinstance(self.initial_charge_state, str): - if self.initial_charge_state != 'lastValueOfSim': + if self.initial_charge_state == 'lastValueOfSim': + initial_is_last = True + else: raise PlausibilityError(f'initial_charge_state has undefined value: {self.initial_charge_state}') - return + + # Use new InvestParameters methods to get capacity bounds if isinstance(self.capacity_in_flow_hours, InvestParameters): - if self.capacity_in_flow_hours.fixed_size is None: - maximum_capacity = self.capacity_in_flow_hours.maximum_size - minimum_capacity = self.capacity_in_flow_hours.minimum_size - else: - maximum_capacity = self.capacity_in_flow_hours.fixed_size - minimum_capacity = self.capacity_in_flow_hours.fixed_size + minimum_capacity = self.capacity_in_flow_hours.minimum_or_fixed_size + maximum_capacity = self.capacity_in_flow_hours.maximum_or_fixed_size else: maximum_capacity = self.capacity_in_flow_hours minimum_capacity = self.capacity_in_flow_hours - # initial capacity >= allowed min for maximum_size: + # Initial capacity should not constraint investment decision minimum_initial_capacity = maximum_capacity * self.relative_minimum_charge_state.isel(time=0) - # initial capacity <= allowed max for minimum_size: maximum_initial_capacity = minimum_capacity * self.relative_maximum_charge_state.isel(time=0) - if (self.initial_charge_state > maximum_initial_capacity).any(): - raise ValueError( - f'{self.label_full}: {self.initial_charge_state=} ' - f'is above allowed maximum charge_state {maximum_initial_capacity}' - ) - if (self.initial_charge_state < minimum_initial_capacity).any(): - raise ValueError( - f'{self.label_full}: {self.initial_charge_state=} ' - f'is below allowed minimum charge_state {minimum_initial_capacity}' - ) + # Only perform numeric comparisons if not using 'lastValueOfSim' + if not initial_is_last: + if (self.initial_charge_state > maximum_initial_capacity).any(): + raise PlausibilityError( + f'{self.label_full}: {self.initial_charge_state=} ' + f'is constraining the investment decision. Chosse a value above {maximum_initial_capacity}' + ) + if (self.initial_charge_state < minimum_initial_capacity).any(): + raise PlausibilityError( + f'{self.label_full}: {self.initial_charge_state=} ' + f'is constraining the investment decision. Chosse a value below {minimum_initial_capacity}' + ) if self.balanced: if not isinstance(self.charging.size, InvestParameters) or not isinstance( @@ -736,8 +739,9 @@ def _do_modeling(self): def create_transmission_equation(self, name: str, in_flow: Flow, out_flow: Flow) -> linopy.Constraint: """Creates an Equation for the Transmission efficiency and adds it to the model""" # eq: out(t) + on(t)*loss_abs(t) = in(t)*(1 - loss_rel(t)) + rel_losses = 0 if self.element.relative_losses is None else self.element.relative_losses con_transmission = self.add_constraints( - out_flow.submodel.flow_rate == -in_flow.submodel.flow_rate * (self.element.relative_losses - 1), + out_flow.submodel.flow_rate == in_flow.submodel.flow_rate * (1 - rel_losses), short_name=name, ) diff --git a/flixopt/core.py b/flixopt/core.py index dea56ffb2..c163de554 100644 --- a/flixopt/core.py +++ b/flixopt/core.py @@ -142,12 +142,12 @@ def __repr__(self): @property def agg_group(self): warnings.warn('agg_group is deprecated, use aggregation_group instead', DeprecationWarning, stacklevel=2) - return self._aggregation_group + return self.aggregation_group @property def agg_weight(self): warnings.warn('agg_weight is deprecated, use aggregation_weight instead', DeprecationWarning, stacklevel=2) - return self._aggregation_weight + return self.aggregation_weight TemporalDataUser = ( @@ -606,19 +606,36 @@ def get_dataarray_stats(arr: xr.DataArray) -> dict: return stats -def drop_constant_arrays(ds: xr.Dataset, dim='time', drop_arrays_without_dim: bool = True): - """Drop variables with very low variance (near-constant).""" +def drop_constant_arrays(ds: xr.Dataset, dim: str = 'time', drop_arrays_without_dim: bool = True) -> xr.Dataset: + """Drop variables with constant values along a dimension. + + Args: + ds: Input dataset to filter. + dim: Dimension along which to check for constant values. + drop_arrays_without_dim: If True, also drop variables that don't have the specified dimension. + + Returns: + Dataset with constant variables removed. + """ drop_vars = [] for name, da in ds.data_vars.items(): - if dim in da.dims: - if da.max(dim) == da.min(dim): + # Skip variables without the dimension + if dim not in da.dims: + if drop_arrays_without_dim: drop_vars.append(name) continue - elif drop_arrays_without_dim: + + # Check if variable is constant along the dimension + if (da.max(dim, skipna=True) == da.min(dim, skipna=True)).all().item(): drop_vars.append(name) - logger.debug(f'Dropping {len(drop_vars)} arrays with constant values') + if drop_vars: + drop_vars = sorted(drop_vars) + logger.debug( + f'Dropping {len(drop_vars)} constant/dimension-less arrays: {drop_vars[:5]}{"..." if len(drop_vars) > 5 else ""}' + ) + return ds.drop_vars(drop_vars) diff --git a/flixopt/elements.py b/flixopt/elements.py index 6ae1c2cf1..d094ed9e0 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -13,9 +13,9 @@ from .config import CONFIG from .core import PlausibilityError, Scalar, TemporalData, TemporalDataUser -from .features import InvestmentModel, ModelingPrimitives, OnOffModel +from .features import InvestmentModel, OnOffModel from .interface import InvestParameters, OnOffParameters -from .modeling import BoundingPatterns, ModelingUtilitiesAbstract +from .modeling import BoundingPatterns, ModelingPrimitives, ModelingUtilitiesAbstract from .structure import Element, ElementModel, FlowSystemModel, register_class_for_io if TYPE_CHECKING: diff --git a/flixopt/modeling.py b/flixopt/modeling.py index b03d431ba..300da1589 100644 --- a/flixopt/modeling.py +++ b/flixopt/modeling.py @@ -53,49 +53,67 @@ def to_binary( @staticmethod def count_consecutive_states( - binary_values: xr.DataArray, + binary_values: xr.DataArray | np.ndarray | list[int, float], dim: str = 'time', - epsilon: float = None, + epsilon: float | None = None, ) -> float: - """ - Counts the number of consecutive states in a binary time series. + """Count consecutive steps in the final active state of a binary time series. + + This function counts how many consecutive time steps the series remains "on" + (non-zero) at the end of the time series. If the final state is "off", returns 0. Args: - binary_values: Binary DataArray - dim: Dimension to count consecutive states over - epsilon: Tolerance for zero detection (uses CONFIG.modeling.EPSILON if None) + binary_values: Binary DataArray with values close to 0 (off) or 1 (on). + dim: Dimension along which to count consecutive states. + epsilon: Tolerance for zero detection. Uses CONFIG.modeling.EPSILON if None. Returns: - The consecutive number of steps spent in the final state of the timeseries - """ - if epsilon is None: - epsilon = CONFIG.modeling.EPSILON + Sum of values in the final consecutive "on" period. Returns 0.0 if the + final state is "off". + + Examples: + >>> arr = xr.DataArray([0, 0, 1, 1, 1, 0, 1, 1], dims=['time']) + >>> ModelingUtilitiesAbstract.count_consecutive_states(arr) + 2.0 - binary_values = binary_values.any(dim=[d for d in binary_values.dims if d != dim]) + >>> arr = [0, 0, 1, 0, 1, 1, 1, 1] + >>> ModelingUtilitiesAbstract.count_consecutive_states(arr) + 4.0 + """ + epsilon = epsilon or CONFIG.modeling.EPSILON + + if isinstance(binary_values, xr.DataArray): + # xarray path + other_dims = [d for d in binary_values.dims if d != dim] + if other_dims: + binary_values = binary_values.any(dim=other_dims) + arr = binary_values.values + else: + # numpy/array-like path + arr = np.asarray(binary_values) - # Handle scalar case - if binary_values.ndim == 0: - return float(binary_values.item()) + # Flatten to 1D if needed + arr = arr.ravel() if arr.ndim > 1 else arr - # Check if final state is off - if np.isclose(binary_values.isel({dim: -1}), 0, atol=epsilon).all(): + # Handle edge cases + if arr.size == 0: return 0.0 + if arr.size == 1: + return float(arr[0]) if not np.isclose(arr[0], 0, atol=epsilon) else 0.0 - # Find consecutive 'on' period from the end - is_zero = np.isclose(binary_values, 0, atol=epsilon) + # Return 0 if final state is off + if np.isclose(arr[-1], 0, atol=epsilon): + return 0.0 - # Find the last zero, then sum everything after it + # Find the last zero position (treat NaNs as off) + arr = np.nan_to_num(arr, nan=0.0) + is_zero = np.isclose(arr, 0, atol=epsilon) zero_indices = np.where(is_zero)[0] - if len(zero_indices) == 0: - # All 'on' - sum everything - start_idx = 0 - else: - # Start after last zero - start_idx = zero_indices[-1] + 1 - consecutive_values = binary_values.isel({dim: slice(start_idx, None)}) + # Calculate sum from last zero to end + start_idx = zero_indices[-1] + 1 if zero_indices.size > 0 else 0 - return float(consecutive_values.sum().item()) # TODO: Som only over one dim? + return float(np.sum(arr[start_idx:])) class ModelingUtilities: @@ -308,7 +326,13 @@ def consecutive_duration_tracking( ) # Handle initial condition for minimum duration - if previous_duration > 0 and previous_duration < minimum_duration.isel({duration_dim: 0}).max(): + prev = ( + float(previous_duration) + if not isinstance(previous_duration, xr.DataArray) + else float(previous_duration.max().item()) + ) + min0 = float(minimum_duration.isel({duration_dim: 0}).max().item()) + if prev > 0 and prev < min0: constraints['initial_lb'] = model.add_constraints( state_variable.isel({duration_dim: 0}) == 1, name=f'{duration.name}|initial_lb' ) @@ -435,7 +459,7 @@ def bounds_with_state( lower_bound, upper_bound = bounds name = name or f'{variable.name}' - if np.all(lower_bound - upper_bound) < 1e-10: + if np.allclose(lower_bound, upper_bound, atol=1e-10, equal_nan=True): fix_constraint = model.add_constraints(variable == variable_state * upper_bound, name=f'{name}|fix') return [fix_constraint] @@ -481,7 +505,7 @@ def scaled_bounds( rel_lower, rel_upper = relative_bounds name = name or f'{variable.name}' - if np.abs(rel_lower - rel_upper).all() < 10e-10: + if np.allclose(rel_lower, rel_upper, atol=1e-10, equal_nan=True): return [model.add_constraints(variable == scaling_variable * rel_lower, name=f'{name}|fixed')] upper_constraint = model.add_constraints(variable <= scaling_variable * rel_upper, name=f'{name}|ub') diff --git a/flixopt/results.py b/flixopt/results.py index f4ddc0071..294039924 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -291,17 +291,18 @@ def flow_system(self) -> FlowSystem: """The restored flow_system that was used to create the calculation. Contains all input parameters.""" if self._flow_system is None: + old_level = logger.level + logger.level = logging.CRITICAL try: - current_logger_level = logger.getEffectiveLevel() - logger.setLevel(logging.CRITICAL) self._flow_system = FlowSystem.from_dataset(self.flow_system_data) self._flow_system._connect_network() - logger.setLevel(current_logger_level) except Exception as e: logger.critical( f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}' ) raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e + finally: + logger.level = old_level return self._flow_system def filter_solution( diff --git a/flixopt/structure.py b/flixopt/structure.py index 3dffeb5c3..91d5a53c1 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -422,8 +422,9 @@ def _resolve_dataarray_reference( # Handle null values with warning if array.isnull().any(): - logger.warning(f"DataArray '{array_name}' contains null values. Dropping them.") - array = array.dropna(dim='time', how='all') + logger.warning(f"DataArray '{array_name}' contains null values. Dropping all-null along present dims.") + if 'time' in array.dims: + array = array.dropna(dim='time', how='all') # Check if this should be restored as TimeSeriesData if TimeSeriesData.is_timeseries_data(array): diff --git a/pyproject.toml b/pyproject.toml index 902694a82..7b2aef6c7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,7 +182,7 @@ markers = [ "slow: marks tests as slow", "examples: marks example tests (run only on releases)", ] -addopts = "-m 'not examples'" # Skip examples by default +addopts = '-m "not examples"' # Skip examples by default [tool.bandit] skips = ["B101", "B506"] # assert_used and yaml_load