From d6eaadefbaca1f7bc2e1d079a98a463eb7ccf539 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 07:30:29 +0100 Subject: [PATCH 01/87] Got docs running --- docs/index.md | 17 ++++++++++ docs/javascripts/mathjax.js | 12 +++++++ mkdocs.yml | 68 +++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 docs/index.md create mode 100644 docs/javascripts/mathjax.js create mode 100644 mkdocs.yml diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..000ea3455 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,17 @@ +# Welcome to MkDocs + +For full documentation visit [mkdocs.org](https://www.mkdocs.org). + +## Commands + +* `mkdocs new [dir-name]` - Create a new project. +* `mkdocs serve` - Start the live-reloading docs server. +* `mkdocs build` - Build the documentation site. +* `mkdocs -h` - Print help message and exit. + +## Project layout + + mkdocs.yml # The configuration file. + docs/ + index.md # The documentation homepage. + ... # Other markdown pages, images and other files. diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js new file mode 100644 index 000000000..95d619efc --- /dev/null +++ b/docs/javascripts/mathjax.js @@ -0,0 +1,12 @@ +window.MathJax = { + tex: { + inlineMath: [["\\(", "\\)"]], + displayMath: [["\\[", "\\]"]], + processEscapes: true, + processEnvironments: true + }, + options: { + ignoreHtmlClass: ".*|", + processHtmlClass: "arithmatex" + } +}; \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..8669aa925 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,68 @@ +site_name: flixOpt +site_description: Energy and Material Flow Optimization Framework +site_url: https://flixopt.github.io/flixopt/ +repo_url: https://github.com/flixOpt/flixopt +repo_name: flixOpt/flixopt + +nav: + - Home: index.md + - Getting Started: getting-started.md + - User Guide: + - Installation: guide/installation.md + - Concepts: guide/concepts.md + - Components: guide/components.md + - Examples: + - Basic Example: examples/basic.md + - Advanced Example: examples/advanced.md + - API Reference: + - FlowSystem: api/flow_system.md + - Components: api/components.md + - Effects: api/effects.md + - Calculations: api/calculation.md + +theme: + name: material + palette: + primary: indigo + accent: indigo + logo: images/logo.png + favicon: images/favicon.png + icon: + repo: fontawesome/brands/github + features: + - navigation.tabs + - navigation.sections + - toc.integrate + - search.suggest + - search.highlight + - content.tabs.link + - content.code.annotation + - content.code.copy + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + - pymdownx.arithmatex: + generic: true + - admonition + - footnotes + - attr_list + - md_in_html + - def_list + - tables + +extra_javascript: + - javascripts/mathjax.js + - https://polyfill.io/v3/polyfill.min.js?features=es6 + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + +plugins: + - search + - mkdocstrings: + handlers: + python: + options: + show_source: true \ No newline at end of file From 40fa55cf2ed9d2f26ce989a8691fc03782b52c6a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 08:28:49 +0100 Subject: [PATCH 02/87] Improve docs --- docs/api/calculation.md | 123 +++++++++++++++++++++ docs/api/components.md | 159 +++++++++++++++++++++++++++ docs/api/flow-system.md | 71 ++++++++++++ docs/api/linear-converters.md | 150 +++++++++++++++++++++++++ docs/concepts/overview.md | 103 +++++++++++++++++ docs/getting-started.md | 143 ++++++++++++++++++++++++ docs/images/architecture_flixOpt.png | Bin 0 -> 70605 bytes docs/images/flixopt-icon.svg | 1 + docs/index.md | 17 --- docs/latex-example.md | 106 ++++++++++++++++++ docs/readme.md | 107 ++++++++++++++++++ mkdocs.yml | 22 ++-- pics/flixopt-icon.svg | 1 + 13 files changed, 976 insertions(+), 27 deletions(-) create mode 100644 docs/api/calculation.md create mode 100644 docs/api/components.md create mode 100644 docs/api/flow-system.md create mode 100644 docs/api/linear-converters.md create mode 100644 docs/concepts/overview.md create mode 100644 docs/getting-started.md create mode 100644 docs/images/architecture_flixOpt.png create mode 100644 docs/images/flixopt-icon.svg delete mode 100644 docs/index.md create mode 100644 docs/latex-example.md create mode 100644 docs/readme.md create mode 100644 pics/flixopt-icon.svg diff --git a/docs/api/calculation.md b/docs/api/calculation.md new file mode 100644 index 000000000..f72de0a67 --- /dev/null +++ b/docs/api/calculation.md @@ -0,0 +1,123 @@ +# Calculation API Reference + +The calculation module contains classes for solving optimization problems in different ways. flixOpt offers three main calculation modes, each with different performance characteristics and use cases. + +## Calculation Base Class + +::: flixOpt.calculation.Calculation + options: + members: true + show_root_heading: true + show_source: true + +## Full Calculation + +The `FullCalculation` class solves the entire optimization problem at once: + +::: flixOpt.calculation.FullCalculation + options: + members: true + show_root_heading: true + show_source: true + +## Segmented Calculation + +The `SegmentedCalculation` class splits the problem into segments to improve performance: + +::: flixOpt.calculation.SegmentedCalculation + options: + members: true + show_root_heading: true + show_source: true + +## Aggregated Calculation + +The `AggregatedCalculation` class uses typical periods to reduce computational requirements: + +::: flixOpt.calculation.AggregatedCalculation + options: + members: true + show_root_heading: true + show_source: true + +## Aggregation Parameters + +::: flixOpt.aggregation.AggregationParameters + options: + members: true + show_root_heading: true + show_source: true + +## Examples + +### Full Calculation Example + +```python +import flixOpt as fo + +# Create system and add components +system = fo.FlowSystem(time_series) +# ... add components, buses, etc. + +# Create a full calculation +calculation = fo.FullCalculation("Example", system) + +# Choose a solver +solver = fo.HighsSolver() + +# Run the calculation +calculation.do_modeling() +calculation.solve(solver, save_results=True) + +# Access results +results = calculation.results() +``` + +### Segmented Calculation Example + +```python +import flixOpt as fo + +# Create system and add components +system = fo.FlowSystem(time_series) +# ... add components, buses, etc. + +# Create a segmented calculation +segment_length = 24 # 24 time steps per segment +overlap_length = 6 # 6 time steps overlap between segments +calculation = fo.SegmentedCalculation( + "Segmented_Example", + system, + segment_length=segment_length, + overlap_length=overlap_length +) + +# Choose a solver +solver = fo.HighsSolver() + +# Run the calculation +calculation.do_modeling_and_solve(solver, save_results=True) + +# Access results - combining arrays from all segments +results = calculation.results(combined_arrays=True) +``` + +### Aggregated Calculation Example + +```python +import flixOpt as fo + +# Create system and add components +system = fo.FlowSystem(time_series) +# ... add components, buses, etc. + +# Define aggregation parameters +aggregation_params = fo.AggregationParameters( + hours_per_period=24, # 24 hours per typical period + nr_of_periods=10, # 10 typical periods + fix_storage_flows=False, # Don't fix storage flows + aggregate_data_and_fix_non_binary_vars=True # Aggregate all time series data +) + +# Create an aggregated calculation +calculation = fo.A \ No newline at end of file diff --git a/docs/api/components.md b/docs/api/components.md new file mode 100644 index 000000000..8bd4b554e --- /dev/null +++ b/docs/api/components.md @@ -0,0 +1,159 @@ +# Components API Reference + +Components in flixOpt represent physical entities that consume, produce, or transform energy and material flows. This page documents the core component classes available in flixOpt. + +## Base Component + +The `Component` class is the base class for all components in flixOpt: + +::: flixOpt.elements.Component + options: + members: true + show_root_heading: true + show_source: true + +## Storage + +The `Storage` class represents energy or material storage components: + +::: flixOpt.components.Storage + options: + members: true + show_root_heading: true + show_source: true + +### Storage Model + +::: flixOpt.components.StorageModel + options: + members: true + show_root_heading: true + show_source: true + +## LinearConverter + +The `LinearConverter` class handles linear conversion between flows: + +::: flixOpt.components.LinearConverter + options: + members: true + show_root_heading: true + show_source: true + +### LinearConverter Model + +::: flixOpt.components.LinearConverterModel + options: + members: true + show_root_heading: true + show_source: true + +## Transmission + +The `Transmission` class models the flows between two sides with potential losses: + +::: flixOpt.components.Transmission + options: + members: true + show_root_heading: true + show_source: true + +### Transmission Model + +::: flixOpt.components.TransmissionModel + options: + members: true + show_root_heading: true + show_source: true + +## Source, Sink, and SourceAndSink + +Classes for sources and sinks in the system: + +::: flixOpt.components.Source + options: + members: true + show_root_heading: true + show_source: true + +::: flixOpt.components.Sink + options: + members: true + show_root_heading: true + show_source: true + +::: flixOpt.components.SourceAndSink + options: + members: true + show_root_heading: true + show_source: true + +## Examples + +### Creating a LinearConverter + +```python +import flixOpt as fo + +# Create buses +electricity_bus = fo.Bus("Electricity") +heat_bus = fo.Bus("Heat") + +# Create flows +power_input = fo.Flow("power_in", electricity_bus) +heat_output = fo.Flow("heat_out", heat_bus) + +# Create a heat pump with COP = 3 +heat_pump = fo.components.LinearConverter( + label="HeatPump", + inputs=[power_input], + outputs=[heat_output], + conversion_factors=[{power_input: 3, heat_output: 1}] +) +``` + +### Creating a Storage + +```python +import flixOpt as fo + +# Create a bus +heat_bus = fo.Bus("Heat") + +# Create charging and discharging flows +charging = fo.Flow("charging", heat_bus) +discharging = fo.Flow("discharging", heat_bus) + +# Create a thermal storage +thermal_storage = fo.components.Storage( + label="ThermalStorage", + charging=charging, + discharging=discharging, + capacity_in_flow_hours=1000, # 1000 kWh capacity + relative_loss_per_hour=0.01, # 1% loss per hour + eta_charge=0.95, # 95% charging efficiency + eta_discharge=0.95 # 95% discharging efficiency +) +``` + +### Creating a Transmission Component + +```python +import flixOpt as fo + +# Create buses +bus_a = fo.Bus("Location_A") +bus_b = fo.Bus("Location_B") + +# Create flows +flow_a_to_b = fo.Flow("flow_a_to_b", bus_a) +flow_b_to_a = fo.Flow("flow_b_to_a", bus_b) + +# Create a transmission component with 5% losses +transmission = fo.components.Transmission( + label="Transmission_Line", + in1=flow_a_to_b, + out1=flow_b_to_a, + relative_losses=0.05 +) +``` diff --git a/docs/api/flow-system.md b/docs/api/flow-system.md new file mode 100644 index 000000000..a89ac21c5 --- /dev/null +++ b/docs/api/flow-system.md @@ -0,0 +1,71 @@ +# FlowSystem API Reference + +The FlowSystem is the central organizing component in flixOpt, responsible for managing the time series, components, buses, and effects that make up your energy system model. + +## FlowSystem Class + +::: flixOpt.flow_system.FlowSystem + options: + members: true + show_root_heading: true + show_source: true + +## Examples + +### Creating a FlowSystem + +```python +import flixOpt as fx +import pandas as pd + +# Create the timesteps with hourly steps for one day +timesteps = pd.date_range('2020-01-01', periods=24, freq='h') + +# Initialize the FlowSystem with the timesteps +flow_system = fx.FlowSystem(timesteps=timesteps) + +# Add components, buses, and effects +heat_bus = fx.Bus("Heat") +flow_system.add_elements(heat_bus) + +# Visualize the network +flow_system.plot_network(show=True) +``` + +### Accessing FlowSystem Components + +```python +# Get a list of all components +components = flow_system.components + +# Get a specific component by label +if "Boiler" in flow_system.components: + boiler = flow_system.components["Boiler"] + +# Get all flows in the flow_system +flows = flow_system.flows + +# Get all buses in the flow_system +buses = flow_system.buses +``` + +### Time Series and Indices + +```python +# Get the full time series +full_time = flow_system.time_series + +# Get a subset of the time series +indices = range(12) # First 12 hours +time_subset, time_with_end, dt_hours, total_hours = flow_system.get_time_data_from_indices(indices) +``` + +### Saving System Information + +```python +# Save flow_system information to a JSON file +flow_system.to_json("system_info.json") + +# Save flow_system visualization +flow_system.visualize_network(path="system_network.html", show=False) +``` diff --git a/docs/api/linear-converters.md b/docs/api/linear-converters.md new file mode 100644 index 000000000..87a0c8885 --- /dev/null +++ b/docs/api/linear-converters.md @@ -0,0 +1,150 @@ +# Linear Converters API Reference + +The `linear_converters` module provides pre-defined specialized converters that extend the base `LinearConverter` class. These components make it easier to create common energy system elements like boilers, heat pumps, and CHPs. + +## Boiler + +::: flixOpt.linear_converters.Boiler + options: + members: true + show_root_heading: true + show_source: true + +## Power2Heat + +::: flixOpt.linear_converters.Power2Heat + options: + members: true + show_root_heading: true + show_source: true + +## HeatPump + +::: flixOpt.linear_converters.HeatPump + options: + members: true + show_root_heading: true + show_source: true + +## HeatPumpWithSource + +::: flixOpt.linear_converters.HeatPumpWithSource + options: + members: true + show_root_heading: true + show_source: true + +## CoolingTower + +::: flixOpt.linear_converters.CoolingTower + options: + members: true + show_root_heading: true + show_source: true + +## CHP (Combined Heat and Power) + +::: flixOpt.linear_converters.CHP + options: + members: true + show_root_heading: true + show_source: true + +## Examples + +### Creating a Boiler + +```python +import flixOpt as fo + +# Create buses +fuel_bus = fo.Bus("Fuel") +heat_bus = fo.Bus("Heat") + +# Create flows +fuel_flow = fo.Flow("fuel", fuel_bus) +heat_flow = fo.Flow("heat", heat_bus) + +# Create a boiler with 90% efficiency +boiler = fo.linear_converters.Boiler( + label="Boiler", + eta=0.9, # 90% thermal efficiency + Q_fu=fuel_flow, # Fuel input flow + Q_th=heat_flow # Thermal output flow +) +``` + +### Creating a Heat Pump + +```python +import flixOpt as fo + +# Create buses +electricity_bus = fo.Bus("Electricity") +heat_bus = fo.Bus("Heat") + +# Create flows +power_flow = fo.Flow("power", electricity_bus) +heat_flow = fo.Flow("heat", heat_bus) + +# Create a heat pump with COP of 3 +heat_pump = fo.linear_converters.HeatPump( + label="HeatPump", + COP=3.0, # Coefficient of Performance + P_el=power_flow, # Electrical input flow + Q_th=heat_flow # Thermal output flow +) +``` + +### Creating a CHP Unit + +```python +import flixOpt as fo + +# Create buses +fuel_bus = fo.Bus("Fuel") +electricity_bus = fo.Bus("Electricity") +heat_bus = fo.Bus("Heat") + +# Create flows +fuel_flow = fo.Flow("fuel", fuel_bus) +power_flow = fo.Flow("power", electricity_bus) +heat_flow = fo.Flow("heat", heat_bus) + +# Create a CHP unit +chp = fo.linear_converters.CHP( + label="CHP_Unit", + eta_th=0.45, # 45% thermal efficiency + eta_el=0.35, # 35% electrical efficiency + Q_fu=fuel_flow, # Fuel input flow + P_el=power_flow, # Electrical output flow + Q_th=heat_flow # Thermal output flow +) +``` + +### Creating a Heat Pump with Source + +```python +import flixOpt as fo + +# Create buses +electricity_bus = fo.Bus("Electricity") +heat_source_bus = fo.Bus("HeatSource") +heat_output_bus = fo.Bus("Heat") + +# Create flows +power_flow = fo.Flow("power", electricity_bus) +source_flow = fo.Flow("source", heat_source_bus) +heat_flow = fo.Flow("heat", heat_output_bus) + +# Create a heat pump with source +hp_with_source = fo.linear_converters.HeatPumpWithSource( + label="HeatPump", + COP=3.5, # Coefficient of Performance + P_el=power_flow, # Electrical input flow + Q_ab=source_flow, # Heat source input flow + Q_th=heat_flow # Thermal output flow +) +``` + +These pre-defined components simplify the process of building energy system models by providing specialized implementations of common energy converters. diff --git a/docs/concepts/overview.md b/docs/concepts/overview.md new file mode 100644 index 000000000..2515a7407 --- /dev/null +++ b/docs/concepts/overview.md @@ -0,0 +1,103 @@ +# flixOpt Concepts Overview + +flixOpt is built around a set of core concepts that work together to represent and optimize energy and material flow systems. This page provides a high-level overview of these concepts and how they interact. + +## Core Concepts + +![Architecture](../images/architecture_flixOpt.png) + +### FlowSystem + +The `FlowSystem` is the central organizing unit in flixOpt. It: + +- Defines the time series for the simulation +- Contains all components, buses, and flows +- Manages the effects (objectives and constraints) +- Coordinates the optimization process + +Every flixOpt model starts with creating a FlowSystem. + +### Buses + +`Bus` objects represent nodes or connection points in your system. They: + +- Balance incoming and outgoing flows +- Can represent physical networks like heat, electricity, or gas +- Handles infeasable balances gently by allowing the balance to be closed in return for a big Penalty (optional) + +### Flows + +`Flow` objects represent the movement of energy or material between components and buses. They: + +- Have a size (fixed or optimized as an investment decision) +- Can have fixed profiles (for demands or renewable generation) +- Can have constraints (min/max, total flow hours, etc.) +- Can have associated [Effects](#Effects) + +### Components + +`Component` objects represent physical entities in your system that interact with flows. They include: + +- `LinearConverter` - Converts input flows to output flows with (piecewise) linear relationships +- `Storage` - Stores energy or material over time +- `Source` / `Sink` - Produce or consume flows. They are usually used to model external demands or supplies. +- `Transmission` - Moves flows between locations with possible losses +- Specialized converters like `Boiler`, `HeatPump`, `CHP`, etc. + +### Effects + +`Effect` objects represent impacts or metrics related to your system, such as: + +- Costs (investment, operation) +- Emissions (CO2, NOx, etc.) +- Resource consumption + +These can be freely defined and crosslink to each other (CO2-Emissions ---(specific CO2-costs)---> Costs). +One effect is designated as the optimization objective (typically costs), while others can have constraints. +This effect can incorporate several other effects, which woul result in a weighted objective from multiple effects. + +### Calculation Modes + +flixOpt offers different calculation approaches: + +- `FullCalculation` - Solves the entire problem at once +- `SegmentedCalculation` - Solves the problem in segments (with optioinal overlap), improving performance for large problems +- `AggregatedCalculation` - Uses typical periods to reduce computational requirements + +## How These Concepts Work Together + +1. You create a `FlowSystem` with a specified time series +2. You add elements to the FLowSystem: + - `Bus` objects as connection points + - `Component` objects like Boilers, Storages, etc.. They include `Flow` which define the connection to a Bus. + - `Effect` objects to represent costs, emissions, etc. +6. You choose a calculation mode and solver +7. flixOpt converts your model into a mathematical optimization problem +8. The solver finds the optimal solution +9. You analyze the results with built-in or external tools + +## Mathematical Foundation + +Behind the scenes, flixOpt converts your Flow System into a mixed-integer linear programming (MILP) problem: +This is done using the [linopy package](https://github.com/PyPSA/linopy). + +- Variables represent flow rates, storage levels, on/off states, etc. +- Constraints ensure physical validity (energy balance, etc.) +- The objective function represents the effect to be minimized (usually cost) + +The mathematical formulation is flexible and can incorporates: + +- Time-dependent parameters +- Investment decisions +- Binary decision variables (on/off decisions, piecewise linear relationships, ...) +- Runtime or downtime constraints +- and many more... + + + + + + + + + diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 000000000..fef6a5e67 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,143 @@ +# Getting Started with flixOpt + +This guide will help you install flixOpt, understand its basic concepts, and run your first optimization model. + +## Installation + +### Basic Installation + +Install flixOpt directly into your environment using pip: + +```bash +pip install git+https://github.com/flixOpt/flixOpt.git +``` + +This provides the core functionality with the HiGHS solver included. + +### Full Installation + +For all features including interactive network visualizations and time series aggregation: + +```bash +pip install "flixOpt[full] @ git+https://github.com/flixOpt/flixOpt.git" +``` + +### Development Installation + +For development purposes, clone the repository and install in editable mode: + +```bash +git clone https://github.com/flixOpt/flixOpt.git +cd flixOpt +pip install -e ".[full]" +``` + +## Basic Workflow + +Working with flixOpt follows a general pattern: + +1. **Create a FlowSystem** with a time series +2. **Define Buses** as connection points in your system +3. **Create Flows** to represent energy/material streams +4. **Add Components** like converters, storage, sources/sinks +5. **Define Effects** (costs, emissions, etc.) +6. **Run Calculations** to optimize your system +7. **Analyze Results** using built-in visualization tools + +## Simple Example + +Here's a minimal example of a simple system with a heat demand and a boiler: + +```python +import flixOpt as fo +import numpy as np + +# Create time steps - hourly for one day +time_series = fo.create_datetime_array('2023-01-01', steps=24, freq='1h') +system = fo.FlowSystem(time_series) + +# Create buses as connection points +heat_bus = fo.Bus("Heat") +fuel_bus = fo.Bus("Fuel") + +# Create a demand profile (sine wave + base load) +heat_demand_profile = 100 * np.sin(np.linspace(0, 2*np.pi, 24))**2 + 50 + +# Create flows connecting to buses +heat_demand = fo.Flow( + label="heat_demand", + bus=heat_bus, + fixed_relative_profile=heat_demand_profile # Fixed demand profile +) + +fuel_supply = fo.Flow( + label="fuel_supply", + bus=fuel_bus +) + +heat_output = fo.Flow( + label="heat_output", + bus=heat_bus +) + +# Create a boiler component +boiler = fo.linear_converters.Boiler( + label="Boiler", + eta=0.9, # 90% efficiency + Q_fu=fuel_supply, + Q_th=heat_output +) + +# Create a sink for the heat demand +heat_sink = fo.Sink( + label="Heat Demand", + sink=heat_demand +) + +# Add effects (costs) +fuel_cost = fo.Effect( + label="costs", + unit="€", + description="Operational costs", + is_objective=True # This effect will be minimized +) + +# Add elements to the system +system.add_effects(fuel_cost) +system.add_components(boiler, heat_sink) + +# Run optimization +calculation = fo.FullCalculation("Simple_Example", system) +solver = fo.HighsSolver() # Using the default solver + +# Optimize the system +calculation.do_modeling() +calculation.solve(solver, save_results=True) + +# Print results summary +print(f"Objective value: {calculation.system_model.result_of_objective}") +``` + +## Visualization + +flixOpt includes tools to visualize your results. Here's a simple example to plot flow rates: + +```python +import flixOpt.results as results + +# Load results from a previous calculation +result = results.CalculationResults("Simple_Example", "results") + +# Plot heat flows +result.plot_operation("Heat", mode="area", show=True) +``` + +## Next Steps + +Now that you've installed flixOpt and understand the basic workflow, you can: + +- Learn about the [core concepts](concepts/overview.md) +- Explore more complex [examples](examples/basic.md) +- Check the [API reference](api/flow-system.md) for detailed documentation + +For more in-depth guidance, continue to the [Concepts](concepts/overview.md) section. diff --git a/docs/images/architecture_flixOpt.png b/docs/images/architecture_flixOpt.png new file mode 100644 index 0000000000000000000000000000000000000000..1469a9de8f1f77a98a2221ef45cb87728dfef214 GIT binary patch literal 70605 zcmaI7cU;p=^EQft6zL*0R6$TdLQm*7N|mOlfRxZgKp=qh7Lcwq5$RQmh?IbI2sISx zO=@T%^xjKI;Dq~m-rsx9`#IrT0>n~kBEqP zk%;JKFF7d@(T&^Prcii&5ARA9&7?(EL*L4tGu9r=X#n zZ+ed^|CkG8d^F+ zCMCs^HGaY~>rfsyP~YsoBYm1#(>Qo~A(z!K0deweygHG&nDK{<{nL9->2Bf$ul9PI z8Gge+>09I^T=vndtO+zo=eO(X0A1UT^L%}O3czc>eX?$^jdJ+hy(a@LDrDlf-%)kw zzBz6^3Fg#H6mr2oURyfNboWnoTX}7z?G zY3qy)Dcf12HwsOlOxbKI7tp6!8+}?ZlW^(bG8>kGB9WU+?nu>~? z5cHXn%E(#Gn1h#Td$r3)Q3J*5GH99wviyD@dAS2+p#`+h!sAsr<{uRQ%+`sUbHmQm zxi)x|#Fe=h_|74`s}6^SchBT_9nHS3&f7W9syoRv`s|yWMzM--EPCEC4v1CF7&thX zfql6JYV)ehvs>?6bx%>R&S%U+J@gpfe(hbFI0!6H$o)N8iK!ifH73 zvRoQ1vg=tDmDR4>544_Xe=XBMKDa+^e{$W~YTZVTGtOy_X@KVIu+aJGtf!eN{vvL8 z;C<}-vt!jOq?G9Vq}&5WN%^zBl7N7k85 z-ClUi*uxv&L;W^7b&nPX!^LJ>2roz_&7$$B)6cpd<&$>x|)?`2<~)q*o+@32z` z#+vBLZdcaVX0%_wI;L+|fQ&8?c*ESX+KWlVlVmD`Fv!_tb)UGVxzkizcx(Pawv_T-LwM zSTwS3{=Ssr#I40j_|=muwp+;4t2)owF63Fq{JkRP6<;SikZH(ZcmMmsJ$mH!*+6>I zs-by!4TJH@WHXRz^XPp`%a`%S*C#Wsx}bTO1s;3sz8npc9J_UN(~o3|cKLxga!wVk zvcZI#G3A(l^GH5pZ4XK7$?Hk%QP0fZNt$@}o*DGY5aCsxrd05}peAXo$iSxPUB2wa ztF5DrLJ4xc0^01q>%_q?zEoH0v!ZV%kYxaUeC;Kt*LLTA7uRlB{+^lZq3v@0+i!Ny;LM26vF%6e zqniU(3X;WKb1@_ckUdoXCi;-Qg0Yn|oV6z(Z3ZY0cm2Sw$iW6-w*0MUf+iEsW3M_Ikgi!NEFPB& zc=yW^5edAk#RCr?FB}jxy?#ckixUJM@a}arZwnh$+8W}%vw}jt>q0no%<$%u)&4ds ztEQlpmp=K7oa2Uysj}j@lhUV1l#{=(+|f|a-db+#JGHQR&)vqV!`DTeJuyWFCBH&- z!M;k9jXOx5Vuea8@kUnu%!>>$ zsUw>kd2LWkC4&hPB2Ei@%vzYONY!aFsl$=2fqhdCxg~tb5F=AphgI zV2X~eiXy-Rb_`ayc?CBpw`5O?A?|L7`RgdT=Jt%^wUp%Wx?V~<4cwz%2KGFUX7~4= zOWvvIGWVS{OeC&0&g83x8VIEQUzZ-$m5EiE#KD1o z2O4WFw_Y{W-2(>QS9O`S`pAKs6ECHCYUB5mWA4`Pff%FZTSk`YR0#MO7Q+DtFLIO~ zuJrfLFbL=rM7xZllXY*RbNBtES}w&z3IuHF4{vO7(FM0iF|x7qp|~BsS7%VhuNi+* zB+t2h-wsa!(r)_FwMdY6r;5uPQF*0(60Y#G_ zMJwqp4$#7g<8Dg{V4*+xB)PuCE--C9&NJ1ZqOt^8-ac(TCA{fYpwuTN`*Ew(f0vg^-TaO^mm_GlSh>DOX6ie}|5 zDBKW_E3U^U=yiS!@E=A$AW@V?(ytz^4w|3=qEYi2BiNdtBs0P6reayt$AwX~%3JfH z;znPR9=i_8rZaAxFmtIBn%g_DzcnuJ4mYV2yRammvBFp0@tzj5*U;UVzYT%m!(`-m zO0N5GNtBB8cm7KuGmbX->hd;!G?z@U?B;Fa=II&(=+=q3{1i2qma+iumTCq)(r9)3F8w`7Af2;p9!dOrt9TI|jeh&M%5a zM9lt^G5f}HVxah_4343ZUZMk86g&^F$$F_HeTxG>`z+}sWfDF1vT#c;oHqF{nwa1H z%>3b|gFDt-BV(4BBiu>Xr{>IBVBEiq6ma4Ic}m4Lw(_8H=k$@{&sV5C#KEK4i1pC| z8%^#(W?_@$R@aN&iric0HHRuvC0`_;r7XX$rx(@OCLa>5c3tqq?RVK+t@hQDI02vG zlXZPEo7Vr-vxZIBi7M1?{qB7a4M9z~NN8$ZY^r+`e#vU7H_<$6-tnhFAFI0`VT18; za^d{+o|AxU*OmCCKFJ_VE5$ID!@uuOQ*QFrLdQj{2~TwH0N=Pe5g>N5VoV5OU-)bv z^b{R&Go&eVD5$sdzX+$jQ?BtB9jxqd$K8upr|Dq7sa4Rd==upL$CP>&`0uwT91xLw zMx85A;cUC+xG6BUVYdmTJ+I@;;kV<|O9GnCue4P8iwRDhp@=3VvCW;9>qVn5qlrG!#;830oy0T3@U^Y4_>#&m+QitMdlRsCOka+(XkTZ zsD4bYr>L^6I*we19d}mRfc*8Eb_~bmV{2XO+Woap`zjg$3cK(53fGdjdLZ3luzS{rqc_BkE*t5twAJQIkiV&?U;H zAS0VoaW!w9WWCeH6(G0ieCB_3F%OS)(txjvrrzhMJh;Q&zZWqfvd!Q4Lro|(t(sol z$}9=pcoP|PHsF47$h9|}Uu8cr{(90r+2eN?r|)0FEObONH>{(&qd{}vt@<$nFdKPB z`$$l~m?erPc4G>_jSW@Wa471I>#L@wTdtn+|5b6^Y#p3iToHf8+3}1RU+c3KB9l_{ zR~^?FyJn)lU$`E#A)0VJXw0g>rhy#l-n;M_dwl$aT1HOSJ0pD+cpR@fjMn?OJmp-t zG-B$*?4ER5j^Y0Oe(cu)tqgoISj*w7c0%IALc}xA+j8BDlt(vkI@i#i*W3{JGqL1) ze>^^U4EeUlX?q-6UD?T+LDTTz75pPf91+3%cie+U&r~?=w*kx09 zG{lGJ^G$&|zysya9D;6#0wp6QNo%x2AbRZ&FK9bM@X<2ZsZS1y&oe+ynf%^aL6@JR znG~Ws2Ms|s!>qNa4uLaYBh&lLb98FhnnsM&ELBJxvGRp1{%O2~a{Gr_ny zJ&XrCIx3j!A1laQ?9a)gb>P;dn3n~DwCxKmZc9-7Y+5k$lz1-B0W>$sMlqv+iTISZ zxGBhwcUQH$~M6b+_tq{ z@o6Ml{(RKMS5xz+_D5rWRm}wW<{Z10J6_`Cq4w6V; zio(n;^7aXnB81UZ@>deg5;~xt)vp7zKD5|d=sABFiB;3OsY7HeRlD7;KI#oM362Lv z5ArgzI9+Ymm2*$tii{*dI%~mOb4}GpRPtZqc6KjIQM)V5Yx_BF0Q$=PEdWY z^4)l#|GMTB(H_=!NU31~`Gm7TD`l}$@aE5_${da1OpmyZC$d_aoi$!e>Q z{1@gq8W2l|uA9J^2LfqQj<=ajoRw5%y7$k?k%&rs6u)-@0d|eK|GH%XVSAy2DBnAV z2h2trlSp(kIFQ+Yh!pFK{vjZ}=(i%C<)WC+*s=5@4cdcsC(tC&EKg#StBoSKN3TGt zRo;2z-k(2lh&~nR+QR|aS=g?GlmDji>_No1okTbp^lPs0*-fgZZ;NA;FdXZ)QrqUK z&L}PBwzXlU?F)}1$eo)3WgnJ%EG+WOHqcd~KgTV~_@!e!zG9N=9Sg*S@%88Kqfqp* zZ-GVH=^fPWiSeiL`3u!N{Iqy%BZ>9iGz+(o7&zYK%nLtfCs8`BmMa}q7Qug>jUoJG zxb{6)^omH^y^f5fl5dakWbvwbSL`Ew?w|btyM*U<)w^f3Pe}ZU6^{fC)nlu#7HqC4 zhb|AF&nYTR8GCG=EKYmL`M8V({CXD4T@p`iPqOIkNcHO+@%{(1dTQg4VHq{;q&Z1O zPwrz~BdJW@%xzmY{zL8)HanIBG|_^zxf%~FmDJ+M-XqSu$!&BV(g3&?^I)2fwzJFd zV_Kxt^o;OARQRR2VhfPNipiKMuiOe*zwJKi42fNp()~uiIzC8@&lHyH53Gd%f;ZgF zj(-t2lm_~iy46g8jw0R-NZ1c!XQgZ-1^Ns7d=EUhzSoi1MCsoo<1;YVO#@sXGo;K^rTl8sG~j{ zxjpRkA3W*K2`YI^U=3Zw1$o!`P8Z3Cbz8W*yl>~d`=)d7tAFcWx zc3(C1q$T6Q@dzfSIKIM#Uug?t8p8I%8u!2P*Svymmv?dQ9D+r`h!sirlqm;|UdmAz z2+_LjELwZ*%SJe~-?$MRTz!TR3CuQIMl0{;rY2;yI^HWf6`;8K*|m(o`A*DuTbxZ9 zhKWZBnbel8C@eZT6FxmHssQRRUn~^H}23#rVL|aX4yQ*>3tUxe~6J zamU!gd4j(RKSzUpuj>f#BMoUcotPgchNF+VhK+5#GOn?3b9}T)|K*A##v}E8-F3@` zJmj6~)<4x%5>VyVWaHC^%*a-jLMq0>pC4qag|cC_jMcs-oPau)qUb{j>R1!LHG_B0 zyCPnh*Krt=mc>>b2LmpQY8y3fgHDDpozX?-H-4aDlVM49c=8RFxZ6TCR-xQ1yb0g$ zF}{X&-BuD&ee_i2(XW{p#vt>kS7D!Dy{h97B>CSx8mId$V^|@R+@rdVL85xBkMv$F zdpOh#@tqFyB~3r9v3g7hFa%`lzc@_&?kJfs1aB5sw8g@Xa<>ZJZ&jTgNC?AJCBn!C)#a3O7yRH~FHmx1XX7_+v zuU3Eb%Xx(OzsP#OuipaItoR!6mv^j!QT2WNyH>dQLDKWM)nPTR435)jd$kSCb0|tS5hjCpzpsf_1AuDxQT-oR=kM}sU+Iv?ask?`0$AEoX+z4e zc%EXpTHbhEicKb~ewe=WcID`AE9}df6I+x5UfK$f_~DQi6nagf?e#5}%g5Zd*tX!2 zV@EB0e~PT$mVLkfoR%QO*zf<*#j?vv)Gh&8{g%D-G(4I7(Ran+1Yp)L_7a(?d#mBK zTDygmmCli8H`<{^T7GC_sJGr;_bU-Z*6i&Sq>A3xhylz z>Z`4u`V*r5YKldbe;0F*vuvQ;j%fhY>$Ei~6nVMSlRCfXt`;H*Xim}|Ko%er)6Z4| z;H5mDL-QqP*eR>D#y%(J zmI{*njq4J~JnSuM{KrvlKe5EnGJZN?4M8@MF4|eT}>|v(_0vlP|kBugnR8&!{ znq>5=JB_`YZ$)d{Mau)MZPl$`ope`E?B^h|2Zv7+c(^r|$EN71-83G39~h_Qe5NOT zoTz?s*zR?ip>CiA5^RTCW0vNQ4xaR86dV+*`dygi;U zl5;dUPt-Y*+5u(;=e%F&_kl36&(ur7-6bQgw=bnG&o=0TxcHOgpGeWPADQqu>$Gx- zP&4uBylb~&`y+gh4lby%JpO=ST{Uh!7p&R*%ExGkUC{nYs_YZOpP-A|$fe^Pv$)4- z2<`ferh0GueQruU?zUi!RV2;Et+O6JTrdCT`%MQt#lZf1{@Q(b_E~@jZh{53|A?Om zbfx`m-9_1CDDf3tQzzeENk=8-<+!*G&K4 z1edM6FtgP0%tMc+;#ZqMM#CB(f}#3kwN%>kJnnO`{5%2mgb}iUbs0jWv{ARBK7pk! zUs^|~2j@`oo+*Hw&>?iQ#I8vz}>Rech1d4IrkrVaG>Ezlxmvd-0p1K5cBaE1=c3RjboXLvFxFErEp zGy-;i0L_t5xb>4kv2WgKvfFpJz5tU)W3d)*5dK{pPW5!Oltrq$Q7&Z7X7N>1qgZY< zzjo@9DzyFUea$a#NCybn$OsFUJ#ppCE87M4&YS$(F&whLF`hgAa=1%@QkvZij|BeO z2aL(1`e_JtUi#57?LC6`n3x&Q`q%#$);e2wq=TFwD>Z)S2SV&HqDaz#>qO_4x6sVB z7wG6ZFD%Gp6K>f0-N&`cZ06^}6ju2s>yk6MQj^PHhAJb#isfI{VHN8`c{!6q$LV7> z`JF=eHr2}0yqs&QEl%|we~E0H-harF*~%pd7+GAU1{RPTY~0x`J=Gd- znR#kO9M%iRB!$zdpkH6Z9^#(Q8HnV*V+PzBb}!;T_cLu$Gz6SUi#;ssJ|%T=kM<8b zv`@0uQV1K7%`0W2U|mGIs%zuzySfssDleUHq6pcb*(=*1N@hbF*^i?X1FqT!mp=+A zVs!la?I8?1b*r4GjEXsi?A}Xpsma7`5e)3atpL89Guy98S@Z&D-m4j_G&C=02&jMe z#P>5(u;qwz5Z8x4JUiKR%REktBlDc2wP##`s_2|kiS&gv5vS*OS^s`&j?wGHRy~`` zYggQ!3?RCB;=y1wWy?fAu?}|DFwFE8ux=F`c##psaGRQtkG47r)KcqWZs2(J zHU@d&_W1P`$psyBfa;=I^7=%f3bc~yh{Oi(tQD{lv4cNB0X2iOsD!P2vvnZzH8&so zW>(oN%fxx3vasR`GbYZdDGMjfch|G9^qYfaZb1dBg}xGwLmCpTMO~4?^$*heqPj#n zRqDm6$u^db{c9F#(B|FyiJXYBF0AD5(g<1*6H8#No!nAKQizw?NNZf)<)7GzG8cr{ z{+}~^TZ^{yOb|*Qy!i)100yQP{w{%p*}w_cnv(Pok-cj;@3T?BU?X?LoEZ?Wd$3kl zc`q5*&xMVv`=}XapECHhC~*yD%FE99Y?e66TYd;8aMyKKMQ^qWj-vmOAJIJzJvdq$-2RX5-jU0c!o}80PX(sE@9C@aPT8@I8dK1@ zqaUE<7}3G3NexO$M~pE@;fAgNI56v&OX4=D)iGs3VLdNi45R=?=9cDbCcRsAwrcwk zpNJ-6m{|$0O;6s_gxh(Pq=91S;Cq+^m1RJ`kNSMqAkkBA=( zetY<`ZM`6CZXA=;bd>M#fj7X(;nPmGycaY2NsCGz*VU;L zntOgxC@kFd4mt!l@s;$dyK9bu+F(o~O!c#K`B@A7eqxz5y1J*scD`P|yYE;vr#(it zZzPSZVb6x5=C`Y%K@>eU^T~lya0#=_LtDj=;56?;V(Bw+r}hd4cI$m6vb^t#hrwKM zJA8x6yp}VJ$XbyPDJ_t4YUhPbrsSp;aKL_NTIA)r(2yC}HZVvLh>_x)kpc#4r0V>Y zh`D7!H1Sp@6B%vaGGAm+&~n_^v`aQZkqKjq`HeD$jCbe=hwaZ`EyV80$NW)@P6M?{ ze;P$Ou22UM2;2%()*2{%VLfky)FVieZOvA~SRvgtXwu7$gKn_?qbVH=DY%uM%(~p} zMrj8wjtuU&OMdo|tu2{rByX8-7V7%9d((HAZJie(M^FZ}z!~;<6G=d7GarYc;7?m5 z1jZ6? zoHtXM*?Nr!iSJfl(7XwGQ%H8NVc|a zj1^U&x+)KHywU?M&!JqAJtVr#?>9RHZ)@xBDG>#mnZel|Mkrze{ErtIZ)xv6N}o}V z_Y;Y`1>{*@;!j-)4j1z3ZsE(6aep7SZHTq|O3U8lBQ z#9|%G@n#Pk_i5X3b2p!18AEAy_GFf&%xga~P{@lfgV3z+-dXI`z9J2Ptm0k&2Mp5< zLU-$Y6HA;XTZRt~mPk_Hh+}JwyFVH|WGf#rfCbObQy}pyTu_cT)r4}VgY#=|TENlsl zQ#2oH*Br0XUtJ7L2z@`?Z&;K<2xTik<~>s(HN2&|UIP=>tr<-*g30R8%P+ecJSzc)xWJy1hXS z60k8M-K7LZ*yB?Y)c|1#N0?MV`%?ii4kgwX?>bL=hM>+I@Lkzx$28TYj;F@u{tK#d zfF%DxSPdGI>2NA{R@l%lj^|V8NUJ1)o&4FvM2O1kc?+rUR*8DO&P`gVAj)Vj%{#K{ z8io)5qn2KoE~NnYobgr1+4pf;>_p2tNzEmATEr;`)e&J~#|~kJ0>&RYQxkgBu5ho| z-wImOb|&x%7j$;(Str!$gC)(>7?K)T^vDN#ZuP-#sDOO$XI{KRy()LG<8a@eugsTOqHq{F*U*u%R_w z+O>qf1X;X6JL!F&^Z0;Cw}8m-V54ILa2Q@4==%legjGhkwlgr34o}`G z$PX?0Kp=vC77?%;8w0;qi=jMmyt~l!9f~BA4;r>dv&Z*>v35a`uy+KoI)wou_*pUH z2h$OM^TM?EK3}s%U-)bJ5)4xpzP_@w3)0m4lutomhZ6svHz_15K7(3W`8D)jjIA|y z|9Ziv@#y!6MrrqBjr$A&b^JAa39rUH-)ZUFH ze?3)xR91Q9>1fDAFd^y9+W6FBU z<@{h7*;edJsC}4{w*97fUH{NWUBQsP@4%i(tmV9!)L9M<{E&a?c36KT@F18#SWPx{ zY(7(YUBDIDp-JUx)w1|U!yG9xWQQLS0d!I3rk$hCz#|zegNFonIy$K1%=v~)ND3^l z*Tq5EHsWUZrEG18wk&FWu}gyr^bdG+@+cz*JE`^P7edETfaOnZM`!kmqZ`6uw#Bpt zYU)_#!$7Czo0h4=qQj*1KnI+b86OS>1}t^^8FI3Vy3|$KO0;_G5CTLynM~K;i&Cht zic)9e0QSf0IU^4jtMYStHivYAZ|s~iCrn07Sq^Q%SoXI?X|>#hgbmq8%i=}|oZjQY zp9eCp$Ti&w9ND*t?@tVW5eTdt1kGdFZCIYZ$N##bEcbbQzWVV@E1t9W#5+i>xaid@ zZw@0S%j20(8X$%CMS<}%5qs>>dKDO#m$8rVbN)+`gf)VMCsOGAMX2ezp@7IC+PybY zEjP`Pf&8_9Ck|vPh0OI1hZ*_rozBTEN=uYTE$&YtW^=pvf4LT1cq;G$#i#y>`}`38 zG_>qh_~%#3-W-oY@?~D--Rs$W&LcEN;M`t7!f#s?(l8cA{J)NYu}zQTkskfB`%k*W z4MJG^0RFV0yW2oP_`}|G5%YiBoYuzLai3DAF6g0_+-*U_BwR5B@ua*trnMzn!*eNw ztF$5E9t_%h=p)M@kk3b}|7t4HCd;3R!^9Th6P*Bu9T%ay1+x0eCf<)Z+1mbG@x&ib zfTB>vX5!wzInO+eaLgJ0HGa>Il0H9zKYc9v%rWpTzf>W4$@>b+ zhP>A7X27l>v+a?xxcT!Zy=+f z{@<~};Yh(pyeg0K5H$BiYoU92xDPAeEe<&hLJM{>xz4Kr1*6Uju;aurG@wTDPS^nM z9eBa1$q1DFfc+BjbgyrTU;8@HIsx(s(>*>41R=?S&J8n@HUq*2(zwC~VuGcO_K<^V%xzD1DN(TfU?H0S5zE1t-LB3%UC#T1 z$C7e*b9Af~C$QhR_J=P&mnCX>|cKSkB=M8`5^o5s;Bv-*K)$)Ow$9Ak(ips76xGy)a++{(z z{N+!XR$2;Fz@dYw2^`A#-_58&RSKYsu0`+aiZS`w^j~c^pH}Z7mwS=2UCr{c0R(n* zYX6!q-KoXLI1gXp{W=2tI=J?MSicThmzI>#AECRmey;trYC$)stFJT>kGn|BF2V_f ztkYo8WyF6ZIleG)G`xS1wzYdU&PVF?^ktM5kGtu38>6$y|lh zYkwjK@gfeW-w*0*CUA6Q$9o$fqpjnM==zhhe#P+ZK|hd#ubqLZFWvt!a0T5EYeA+E zPcFHJm6~^$Md$fJK?Wv_ zR%JxJ_gi9>M?bDN=_CisLpAmb?TM>Pu7H`0FDFufe+w=17S%Kd-N*xuhSe%u0dU4^ zb~?ACK~;94V1+1lxVU+nvNmlyP1Vc&Fk<5CwK>tC@*qZHkO>D)hdbT?_+2oi`KpW;xYw zjRPBF?u*WFaI4&O-`kKff?Q!I8YUjz>95v^I^B$T^%fn_)HrbKtfq}6f5_{YO^`tO zP^)pczuIQI=7ndMcuKI1$0bfB=kE6gCGE#URQ}v{{_`*S=L(nlZv*G z>VxbXDt;E)1Ph)74X90I>rG6y7<-=BMlzw^t(F7W)6Wym9`wooi8YIQa5g1x+C7so zm2Nq$1!Q^A8Djwfg&ouRb68Op-Y1+nH#I=(uS;ieR90S+@~*l@1tV1v3IMcy@lOS*<{CaE)eb(PoUJ&+**5`uJ;mf&RlHQnDs~J;^l#xSy=BSJd63%Cr1Kk zgM>3>7?z|~g#v~$boLSi4aVDUz{LAHYt*!y>w$#uC{=d1>qhUB;zkv>{V&V9Ov(43Xj0UsEZ|*}Q8clT&eBwb^CZSkDW4=(;mgHzjSs_SML0v)c0( z*eG&m`8Csj-u(Xs2P)rFR!4X{K52i6Q@DwQu&ep~iop<$UTn6-b!LiZGV8lp`v2P< zOn?b>j10H61M~j^bvv>H6?L2!LJKBoXL?kNldeUc-gUlLpdWwf@nPkSbNlH``^qrY zAKt11Yo2j!0;Hp1R4A`A35eBSJdwS&orV8akr&*)QQCxkue}#^otz}A+Ce+AR!0>Tx`~?;4oV+iQ6z}zVn2f=Nau5 z=hi%7pR&ft?$qvKCz>-G?m1h(9aPt?dU)quf9kUSKjb_ibmww}<0BThvwevzu3(Gp zpAf=vpsRNyc^XiQ5V|MiqCWa?h<5PYxY=4VJB4wzgA4ExHY-Ec(}QZ)htN$YsX|a! zXPsr3TEP{Qi#T&fn|Kou7CHEsieaFZpZ%=czAdk!lqP-v@rd26GXb4F3Mbz^--L(G zBWh>0H4&Q>JR!EU1z9i|j}>FFqM2fIyi?QpJJ8b?avocgXtssjOY^7TwRES@)qrD5 z@D3$`@Fvu{hYi#}qRQ~LzAm7$u$^YSK6#|}vKggK`=1tpWAkD1KP~eCz5Uq?>@Mx} zQ>#~U;TMaMz|zRkf6F562#5wM=(tOOl zsO+4fm~y|qSVGE)kUKCse0uJz;Ii0mWHRSgM&lL>013PDQg1?MTObv0u4C5PlsVRu ztR@jE|3@D#qX}@tvu()x5IIygY@6lk&mwa%ot|+v@x%-Ig1qQQ$llO*LK+mj<#i&( z>cSJUc4MsOQh||`AEYGED!J{BK8dQ0)+E+wcAIDZ zA6oc?O#QwSf;2CfuwZrMjEY6j4fy2SsS!tQr)J3kXk@N_e5TLZ|4iT2wN94qZkqf} zo*|2wKjXY#WSx-G9jSB*xsu%UMdvefQlQbcEz$JzjT^xhtEJq$!Xb3>+1PYHoxC5> zv~qiafV%kh&3e@+D4S|+op06UOt;Zi4D=HsL_#FhLI~(AY#|(;k8JOdGCSmKkzbgu(<$>>D6tbZ9y#p zR3d+Cg=e<67+6qczMkNHu3f69s|HbLzJn0DHh5B4**@=MwtZscK+@&o-j}K-}}gV`_BtbIgBwc+Zu=QE~RY15okF1%6qc$vbM~AR;?{YK#|d->Bujt z!rqH5*i)Wh1v1oB{r=gDa%$Ks6$lBvlA)-aBOg|H+a5~C(ON3#tvVBlypp4T5=FK0 zT$wN|QIFyqCsb}5yCw2si%%qISDxoJs^SiMfi|5_A{4CJLMSW@QI6B4|R=t1ib{VSpn9fV;t z`{6KLgDX1~g9-$6%A*`0{*G6{eq4yvndf#Np+qIE-@*wNo9MAB5VF^55*Y<++a}c8 z5Tv=A{%(Tk0`rup*`{S)f~ZeG-UsH@C}QU5aEN9`(S^-}0*%Sh!RL3qt#hiLNfLWn z(BW@jZ3x)LVmL=-Dh~e|FWq3V|MI$6)JQR26WI7SIxVP{P_N*QRK5y?9jthA;=E^% z(w6$VR>GZr7-I%SSnMvrbTJZZ5wYNP>ZkN&$JJ2EjrGvhSjQonArQe#QRHa`-2FxG z?S79pG;%oDBRZAyJOLL59jJvS|PU7*vK0`Z10qhgT z*zEA<;Lw1Hu(8hwC#=HH>6bDfy^&6XtiVC_Q6P1JEPM-mPkVz2sdS+n27*<*5_e`M z9p=goFi%E9D;ZYnjx`5M6g}45MzasoCp3}D0YQY3pK=G*qjc{&>jd}f5*I7A>QW%H z9K&+m358&f$FtvnV!*4qd>C@1DsPuq{`vH8!y23RI5ON9AdHC>1~Fo{NIzUe)*im& zm0Xlkz_ZW6nR-)wp!4TdN;f3cpd!dlugzaJ6AU*y}>ci zHL&&?RXl!-j8VX@37KI(T@vG$`gdXZw#$q;FU^IO-?n$GDP7sum^yU+COx|O&2zfa zE>y6FZkyDV`yQ`wJ9n_OoORE)s8L--dAYq7Vt@ab144dDh`5ve>OQ7K_sO@6U(|{8 z%$8qT*?+tc5^EG&@e|$({ir`-coCHuWu3vaoc})9x?T20mg4E*q4~GC!Cv`2A%p=Q zwO0Q?T(NE93?OU!bnpQa{Z=0{5VBQj!1+^lzwNMvx%@0(%eg?6eN|+~d38BsRhJj) zM8h)i1D!OtRkPV){&Anq&T-G^S`SEWq=yA+h-J~N+yP9DI9^Tqvye%MMpsC2_t;vb zFCRx;!5wGETX-7#876-l@(%b>z*Vk}uu;e~M(l9bRdtw~8w$X7;beF86g)DhaWP+YIz=ES9|9?qt&$oLo$sodfm6#dIpdi~HI3@3t>X zc9;k5({*#*<$clLV#-3wffDw1R1RQyY(Ml8M18-Ulv0gNdb5jnAD058_Pz}d{$O~8 zu&3MHts8X%tI2P>JD5=RO#g`>t_C=9JIIN$SN50G%(1;2%*?nx2zzoYUDj!A_JJYz zcBj9xDuQ9sqa4ZxU}44jhRM34=W~%Z{B{n(70iPveb_aBIG=uTMfqpbAo}X-!DsGNPyWrtA?qg z>c{J0N><>JDPYm?q^K~JIWKSL5roBpC0g!4tNE=OFSJ?CW^__cFSt)+X}zPuaorTK zdSqovfibLMo)1H_9clK`t{JF4AN(Vj;F$29bf=BGKHQ&Ej%jaLToDE(o0}|yov^|{ z>tpU03FXB%f^SoCQGZ?yb15h75kC-75b;xwXM6VLgmQ5>nTN{m084X8RpGLAY>OND zJ*-IESZjuHCs1}>0Ca&6@uVp1A`nMg5ee_X<6gm`L+lH_l%S~j*9PCCP ztnjv3wx!9Bigo%V)nPW8dO%eMUN231)3i%`QA%zw1N(*-+6%=@9zDo%BsNDC3|9qC z&- zi^J0-@l%b+T}HUVwb!!8oBJVvFI%4nN(f2n6P9ya54uiXkxdti+PsrCz~;hfY{xYi zQQ&T!?Cv{x(i5qv=o?)*@b-DsnL*P*La`}9IA!1hPTNUGHMsG`Bi^xUHWY~}b{;=F z@6H^dBp?}dB1g@z{nQb~K7%&#LWl0mFg-Z}If^ct6EO9gcLpXdp$&%QJXp#ZFRKCQ zbY*wOD51`!z)>~5FGW23x(?FU62#+_)3;I+{NGwwe6X@DS> z2^*^`s}o^NeHRr2GDX!)*LK*5(kCja);2DQHwT8uoxS`$DMA?I+yyaJhCIpadZp5|Jdo--~x z$&Mr=DyoVf30^w%4gsGKwwB6M`@}UhuyCU&D&oBi0Y{Uj{O7KID69Cpx7*Isb_JpS zCsP7j1h<`{I54I#8;%(PR7C&$5l;^Uq-V>80~7_`V&PKutt{BzSE zrDJ#}$N;N3k-|MQ*F~6hDdU|;dV4ZtVAPplHb)@n9pHtw053!;RdVrUMn?olvs=lk znhcBr;myk$YXiMMU*5nk_V0@2xJ=g5ul9qXGmT)rQ0VMDxw*8(vn`^kZ&f zi`K}r2#MAOH!Q|BqXfg-9-EHQK#Gj>UWFuwe3OqW{(UlmCmi_l|06``ZU83MfRt zLN7v4^wMi0M39ab1-tYjLO{CI(2*8}fQVwD3Mit|ODLgZK&pUJLkW>4H3X0nV0Q4n z@Av-R->fxjX3flBlylBL`|SED&+}|Hd9Zjti2|J#?{4n@&pZF`{5Q)^(tG(wyF=bCzWb-_5Gy+pLNy;4(=eoV7t6tn29G(+ zknXL%n!kY0{R1O=99MYvL#B0mw?>@I{V<&S_B-#H3zOt_{uNtGuFXDcIYG#oSl&^D zyt?Yj;`&_q*74^pQD!jGHw@LpsGrf#gEOw!R9$H6T{+J01NSc4#rJ!|7+hA6>M}m4 zoIO%*GJFRy4HmAS#21HKN{{IrT3-Pzr(rP^ve-EN5_T3I%wgYx%qn^?iWo*kD#Edj zeTwChucCeSj9;DY?krssUzsU$xv)CTx_im8GEcR})lvXpds_UIU9vo%Kc3PwE2=RT zFTX%oWEfyN7;>YZlB6tgga3S*XoHGzxOG!9*`Nuv^v+6IE_{CNGYq&<2NUar+T^~5m*?EBhKRi)O1% zWJ8{TB*yG1qszMqT*%0-Ur zsmu!A8TxU~vTQ%%$O!@qG|a2DS>jg0CB1j+ACwJ8L*EOlo?;Z5S#U2%^f1ImlAYo0 z4S^HU^9uP_lV@5K*O-+p(~@HtD5uv-P?>v<=2a-|U~|cZ#))?#%8<#C$Rl<-fe|GH z6zZL;_52eYDy}L!hOKXfVE7;^U`h1hOscQFH?6~7D>~jF?tD%!q&)b%=2>UG=jp<* zC@rx%0{os)xe=D^nEMZ*ij2KN{gcCCc@J47B4(uq zQNln(Vkq!1Er>JYHIyqI$_p$%HbBvHA2C4tgctb$uJ!$m8)@Kv)Ktup!NhDxu+v%kQkU_}Ei}`a?!hr6> z^>-Qpx@Uhqw#~3V*nT)NVb+V039fgt_VzulU->qJ!mH}wfs0(FwH|=S zpNYtbC%fIbnPZqog6-JYtZ05rwO478cj)BT zv9a>tU4N{5rg4V)MbcHU4+8I5PQC=$-PPaoR~dKtTJj1a$mhPv&s?-r>8vM$bQQM= zB3{4hTZn~0SiLSZ?u_VViv%a!kw%FgvCGfIF8_j6g6iOoUj@HE%MMQbfBBN7Q-^x! zjjy@~rCa7srHkf?CDLA;YxUfz!wz?b_AAdv+O`T_zdCEcDg8?m{V+^ex>4rj-_LF_ z)JPm3qfW!3^USbS>YvU=oD=^s4rki2m3u0d3m-0CuHC6RVa6iocjc8VJ)RobwtD*V zjk*V&#Ax9q!+RnT@ab2!)*Y}rNvxNjZC_r>KBb91`I-BLoz}B#OIE9U9^xN4G=GM9 zLsv~DMW2bS8!jCZy!B0pm0Q}YY4sJ{}T{@^&@mq=^LWRMjQZPsQs$`wmTY2dv1d#FBH8CdFcA`bjcYmK4>!O;WO@ z0Ov9Y==n6MbaLtm^mtv^p)b9uFtvCqS=4F`%Y8>t*=@}HLv1%H;)LO)i+g{GUcSLz zx-gyW<|B^0j`nuv*4k{)@5PMD6BM_NwR!C!aTlWl2o)Z$cZ%Dk`IEY)*(c7zTMXNQ zC*^Sy>e}7M&$+Nh>21JlBCva%N#jYUX38NB@C5EIqM`4Ua=c=YrD_8vS?1|>pG#OB zvYEKN{<`z!dNhAEs_J#rQmM-kHIt0GMgEavb6tF{5fs9sLZ2Yb9s@2J%h)Q(81Su( zl_yH{>)|=;8E>cc3TQDnd_60Lje}B3_H!LYU}Ioid11DM@79B!{4x-OzJ|_+rl0rq zr;OTJwO+i}s9r%xg&*Ftu@!&%Lae*N)5D3hl-arPBk ze0yX3oiif%aqJ$IONnIMTPnoTge@gT(PKScb7dell9yM2t*G{6?}b<{GpOdQ+e(iT z=4{;=2yB1x=)UF`!SdY!iIH)H(e6T>VNWW)#LuR`n^k<}CG9D-q(4vnkBo?u2Qrn}UVw(o(iF4ZcxcZv5G_>@URD&&13a z;10d--=poFJ0GkDX>rPjLSR32wa`XZf`zluCx0sZH-OxcIWL`B{p0kXpz zw>cR@%r?PB>~>fDCE$*SHHGy}NBvIsyluSShm87ixFdR(xPH4pqf*&<@Y-Fs9s$er z@zsg=#&1IdQHrOvm-5EShWu}ZPQ;$&RLx%QUu%uR1+f>Z_Er+*ML99p9RHiYNR z2FAU9JWy)EZ)=LjEx;wLJVtS+Y5tR?LXtISy1Gj4>m32s~@ zUhK<<=!FE^z-K#9Hs|X=#zLZEi0aB^}z*0YeR}K|^f%tT>%a z5hDbr(0+S(W7IyScIBtez62t~riOF>_owJaw`CQ@h#m2rIRhrHVTomP;j^8at~_t{ zQf0lyr~sa^K4j=L`?*4&A6mPUlg>_j`#Z95vc3>#BY6K;g+F@q(%;E9fBj-AGv+VY zV`qg5O6;a(z~b!~1DfJnF@=WoM!iFE1d=)Q%zZ&#(F9I$OQI=Cq^epTWxis?6J<}} zXlVs-kR>dMr&5|NX!RFd%~EdCs@hHm;6eav=4oLN#hlmUG9O)KVlnSiF25M<5=7C$ z4pC2dRL1#&@BS0cI_rCzi=)x@x+%^<6C`iPJCZv#qo|%C@~Q!`p5k`&AkUxym>MY; zX2w>gmer1%2%4Ol4&LusnNRP4*sMQ)!fAzaAKX}@Hu8{>X{h)4T{ZVse}470?dMxU zK)XDXxmr&0EGR-MfSc&#VcqIttdJ4UP57N9yOs;r{isXbO@AZ2w}HQ1MkY_qT2{JR z4AyKk^`LEIo0BJ)Ntm$>LD#fd_JVX>gR65il?zqv9CTd%&X8#?Hu#JatStn(8Q2sL_; z$+^F}Ficp32gsl@H}P=gaKX{~r6g z?F6SK_Uh%Ga2s8L1!I--fg82a^OJR+gw2&WEanqq@Jy znqx4#+U7*z+MyW5!5Yd`p;Jp-;jY!R4m8-@zS%clRRl-vlZk#ALXB+wVl!eOvXs9f zw=H31eNSzHy`aGVYIV!k(+$C6LPKSwsTGiC(Sr&>{Op_e;kZ&qd%rDlt0#eS&H)=^ zZJYJ*%lPPbqoiz=jek$0d!@LUZ|#l30vo@H@2c5zd6ufrMz+5UzCb0W*0IE^le28d zHj*~y^2Tdrvu|hZ0F5}LN8Hv*kIhiAoe7~kaQl&@^E80#_|yV?W!0MSE5sSTiNE~? zLiYCq24uDNzQ~wAdsnmc#;HVhp<{3Dm90FC^t9ut>Rv>Q6pD=P{&HAlsd1oXl*?z* ziDVeqS4n&)uFl;wU-zBq75l;3(YNFwub;h(kz47VuEW@hEj3o22wCVB6_0SKL%A9U z%5?k0C;l2tJlNvw6i!R6a6Rr>8MNhZtY)v3r0xlK92?)-x!E`%yfscN_g-cnA7800 zTtG%gRrjZ#>{q{Dm-UV_WV|8zKF-#BirU(wHj7W*U%XpudV%CxuXJqofFyG}{6J%mQfxx}f_^v+Uod2X6m*+4(3Y(K(bkzJ zLZf~G#wFx*`QKLQTd9TPBKtBOn}p7$NfTwYN;Sg9;LnwMqkZ|<9XB^m2=b`Uq-=Tr z@^Y=BghBOY`4{iKotpWO`@%6zE||^T-}@Op3M=YH2suiLE_G9VM}ImPfPb7&E`&lk z%1#U^zF^Dy=tlIw-J17zyPI27)QnW-va3AOKyoTaA#NQ@K0+cXeTa=N-JIm_u8vH5 zY;Xhh)3D9{lIeuh@hoDGcU~Ra&tIRWBb^Fr@sJWg1?@C3ms#jHbN#Bu#q5~!sSb4; zKvwO3OMZ%>to-^xe1p0hy*@uMK&@2>(U7XMZ|j#H6t=Wle-kV}xfU)Lc^xp##s$4gQ4{0eInskiL16G%Th`T2St6@vr}djly;&1yrT&cZ0{mlNi)|Cz zg@n8IJdQl$iWetI@9D$_x|4aXfPiHB2PrF6o2_r~KawJsaH|d1ppuZ5%3U4;>u>7i0t~!uYQ4{) zUMBks)m|4&lLW%o3&#WbPXz}ynVr-AZ4A)0{oDf#V!YBUW5)U4JVgsvJ`pG-x)h? zT?4#1c5hQ{;9FmB;#n>!(g)47-{v#z=HHiGd}QIOvxqjT0ap<0oSzC%K9@Eb+6$mV zlq*!HuK+IcdN=(8mJS50g*&V|Y&(z?b3ux^l`-%${|Z|n=c;MGp!v28N1{}B)?swM zKa38%QMKEtWJ3*OrMLq>I4Cm;uX@Mi*BO2wl*@C+ zK2`X&xyMFXu=z#P(d!rg1_hnzEgmXCRx=5lX7l*#I{$+}vF`ewP-EyaZ=j5TW1Xdk zeOruH54$5G1noM{{B9-}q2yEFu2P9;kTb^iB zcz(yR=2eo%V*YqiOS{BwL`DLqQ9(Nu7YLl}Y_+?vSicg!JdvU~>u;aQuF-s>bg|_f zxpD_>lk~#~402j4afv~tX>ozWFJdcbO>s&)b(>p(_Mc}}*&VL}qDC11yOB;PDNeI| z!Y9z~%Qd=<>>%cE)9wuV;HzC>%qpS{-JrR{%-gHWdrQkvK+Trs(M(IZp;%+LQrhH! zA0y-S3xwF`4X=qr@H|d5R5=nw?fWmI|JDC6`gQihYDN1)0Nhq-9|W(ze~IJ|cI4xa zUcinAH%*5AGW>)GLFQ4vBzw~1ht~x8|LpfCcbT`3)(;K`hX>5{z8fmK4ei>bCB{i~ zo68{P-E@H3m5d#P!=pLVX^G16J%nChT8@XY-YCn-@CU;GDawsyV$?P1?BAO+(30}Zp4beuq4 z<*`m_x*ASf7zZpEUHLrB`uhx3^SYL4VeYP?Pgq?mU7Yc)ox6Wb-~Z7V{}&$67rO3G zkkelgZqp{W1_}8fVJ|H(k2E>?!7RF`bF{5rs(*e|;H6d{e=+k}speIIK>K zh^#fzIB$>7KQSuU8-Ek^ot2+;d!M{#3t};{6LtB2+`#=s_5GRs1w%vH$b!|9F|w08 zw`mTe|J#dDB2b-qD4xrqnN9E+aiQ|;s@re^CtH0ju62D;z5Lp zHonK#hFgy$98TDQ8tLGw{Xu*#Gq!$7DyAcLL3l2p^YQ)2kVPjeej0L37TJ{o7;2M+ z!MoclpR9KO2B5Wb7Xd!BlZJ1jNvr^h@l(FvP-xIwlBh|8bj1V%RvyIZB0d!Qum36p zzFBXEI#&aCdv$)iZe#K|b$Vr?q`T_yDR7Ur8lbsl%8egt8io)}es9ngRe06w29s3{E-2SooEv}&Ox6)yd{3$-;dM`>ShQPQ*&UA^jS z*N=s4Psz0ikSwpPsjK=fE%1!3gXdx)i2S&6?kioNR~*&>N^ctmU~g3coY&b9Bm*BE z%|XGm;Z;pz3Q3AM=AGQc4x=Iv8+}$0FMSu-s(rWS&<9AJMUgi)?#A{{_1;R+6s?ns z&K*l5pJ7Mrb=x$)9*ElQ#;FLb(NK2^Gdcj8o+<|u*nBL}s53As=t6acNznj49GOAd zW^AT=4nYmsmlFwWIz=n7GWEx(^`5n-e8<(h0kVKX;n96_U3BfT#pv|^gIYs2~(>2 z)?1d`G*G**|C1=YoL^SpKJ!L)=RI?i2C|vPOh3-UHptuMk#PPPZ=QPJ?&=W-&g?` z&4ZAZ`L?8htZo>xnagoUSb#=Yu$gMGnd!TWr~aktf6yx`P2K|*%w~mWCV$CM zf4zEHc(d85O4Wl;y8>)(Kn8NLioQ=i?r^YTVJsubh|!TW#j6w1VBCbk;W7L#E3fPR zJPf1KgbPds_yV4D*LThrvALH}*RU|I*e0)B_58k%BGE-(^nMMD?`ZREf4^$e%18DT zgr+StF9-fa;?cs#8=pKD8UE8H^o2(_f9u^O97;VjAn+B<$fK6R=rrprz@Yw92dm@{ z$5yZD>f(AXTvFRmafI*Ex2HUsFJV)T|Nc!u2uMo`$(C=1ten^*=O%g;U%{ zdQi15JmkwZGX^q$I-=fQbvVpM{nhF_ORA9{&h@6Qfaq$6o{ms%%t>f6U^I=in-znTlP(Ml}HbwE1XM~>~yj0Gn@z7utNmQwl zQx%f03S5_^=S$!9bF2!z0Ri{0c6}N9d`|@{>aXt{gxdn*np$hW&w+`_88mhVTZAvDdxPhrT12ODf%U ziGxn2S?YJJog3!v?g4b=eqXpcYc(ubjE}7D)QK*DpPSB+DL*2jEfa=8-ybgZUw;6* z(ev0_;yGu_G2y8%Im~{66JMN8@sX|eMq-Geb3LITINEZLlO z#S-~7o-^`!1ZwT(gUOuDyQLfWyDF;2a*ZGkZjP}E4sL! zes~z8ZlG2i=l++*wy516+s3%O_T4qkKaLCe!gL7IJVW?ZYmlu!0`{ZA36Qf)e(TLDf{%;cq?_O!_aTB6nRV=;J{Dk@ z7j;0__9#~*)f-dnbTF}S66r3}Q9` zX(dXs1Q^I)Ewm#zb{jt4{`4R!kVaKMK%)LK7fR-wIYy=NSGXY}+WX|Hfg0&#tly#O zPABju0HVenrkg8c^!vyZU4ea8SF2BL@GCK%3vC`(NSxE>BUW;|UKH_q?DD_n`0}obrGlb* z27~P0V&HcAFvm<{-~Wy#IgD<6k?l;njMiYHh{rpYnc+|Wi{6_63iJ8%&ToGzlm#rS$5{)k?`!eN`-h9*??JYtxtV$(cg0V5xE z7-^m5=W(ow)L3Jd{azkF;fi-Z(QLHCGqZ^mo1Y1r&vw+=Z&2Hq_C3n|03Y2mMRDFX z(SleLaKaah21>UYAXV#9K9=mj0H|CNn6d=&J&P&v%oF8v9@(6AG<9P#O})ny5$YKQ zPTfGLW?rQupqrKjIX17RtJV0VjR{;yfqNKJ*HTtgStbC1xIjXJWC~5gV{OVi;FBrd zqHwG?etn@c?yk?YL9g=3^;;vky?$Br2Xz)&YCJ-9&*n7RwOOfwzqIssoIWc%aYawd z#RSMJRseQm)YcF}d%s={K#Q?OQ?u_mbNF>5qxhIunGQ|qLmpq%!YUcU-{X@`vqiO4 zT@2TUu?f4nUQ?d7u~DhAalRSCAIJaVd%8_USYeb3M*SHl60u#x&dZ*?s2x&q-6vVb z?IVvK8}s4`-XN&RK_6udH+avaai**Kx=u$mycgb^r(0U9V_!Iyyab3iK-eV5=8E|C ziH+9)bBvMF!?r!Q?9(K53Z93YCn!VWP`W95${l=;6*t2j!Z@SV*#x?ZBw8+n36EBA z>F-|th4kH|voi#438)J;-IT6C(oxkU#F}!IgeGPLiDtQpWv_DqwDB#~iX|}VH&Wcj zoxY$IVgchIQ{5&VMG4>lXR#DT@e3bra%d<7X~y`p*Gx?9d}ZI^L;=(U;gKxQU-O0# z(cVT(s$3lIb*RX1wNsadDxAttBYtaIf}Ag5hvD*h$Yz(o;P3IW&ztZ~-q&4&_vwQ_ zGdGd_L|t-97s{o0qFn(N)#&~jCgNa5gg;;&NDTnigJVSva&}I_+eL+!JlUj1nf6sL zcI-Pl5K@}$ZR9%|n9oW&WXq_&1kY;-PspOrx}M2dQa>IXPv@#;U@yR@^8D}&F70=M zO8%GSNm-=^lD1ZSf|x?^@Hhrg+x~5%!d(&iE1F*sC#& zQJ3^+j?nJeDvsM9DR*^opX*sDtWI>hFZR;m5E!IncapE(^C2IK$7368n!yQDqh*w{ zp{N)Dq-MlzjTU6!$ji`hLD1%mYpHeellQ!= zXY%B(Ie&}^Rju3M0foHq9z^>#(jAh%RtDBZi3E6p%ez{$|zRJB7U@Ogw5 zw?B>1c&8F#2Ozh%WTOFr&Vn(jXuSI-zBsBZ+{a3IYbeAwJb8SNtMQTC@q~Yh$oG&u zPE*qf*s#OuI-T2F=G}iX9f|ToeMQG0XkTiM)}SqeQAL2nn8){U=i`q%4eOigqy62g zIl}u`0$zj=#K_}LAL1g3;khPH`^|WoC1SF;`(#_GZw`d|MRZh2sKG$*gvn1mUjOuWya4I5ee#!|0C$P;J z+|&&?A^Lqzn}SGZ2uvflWX{=5`hINP{6(~68S z;cIc8bqY_HqWj2x<;LFr8^_Jbll)_?F5P{_D`*gud)}b3&!oD4A1{nCRo>Oy; zV)Oe`;q==G`RzD8Q#TtPuLVN^{F$Ic-omK1@~oK+>Vn~rXMkgGalNqaw-IkM^<-{{ z_%Gz{5h_Y6blb!?Yg=AuG5UV5h$j$xR^Pq#=Pu{jZLv3rkym`zVDzC2i# z;Kf+<b3@`=pD2MAz-=k3~IhpbHY>4Qhdc)r(Cfm(!?;zLAsNbtEp0Uyqggbc|0| zfXHk;_zIRmG;F^|F#xfXj*+DCd%G0EhAu;JYoBHKdS=~WBEl3p);h?@>mgk3wC!fT z8(3@I@Tem`g9;n-%c|Bra^3!5vW(WtHaIja@)`={T`yUjt)vX|M#bRUt84~8zrD|k zE6xz;r#n86EQsI4*;sE$^{aQgv^P8)b~1*#&TH@hKfsXZ>@S<0LGF*%uD>a$2leYh z-f|yq%@oA?RfiQl>UpHvjV9@5*^l&iHW@d3XmFuu!7SLuREXbf& zcs@IWG-A1HK?+Mw1HQ~es6c29tdr1?sF=IfI{@D-g|A}`iFz-FU-TT~m|SV7 zkRf<+zc_T`No-yN*r!>pvmMky2kH2I2?U~EZf;J#D&ng!+}JX1>tDLtnh9xlZvFJ1 z=?YeS9+%yODB;~khn}m@L*WJ_8%oZICAZ^NFW2_eV)N@bJmT=c6l|jpQ6oF8Q%9BgeC{&vm$ zL|*b*2*Je|)>9F5xb*PKG`6*+{Pm3C$VuHZqmI*G4yS>gn$J__Sg0^bG!8<^e_Skb zwW_)^%LJO{>r&M>56o+8(ufyY2C5cy@F6F8`;X|hF7fpN#SZLa6LqI+Hl-8M+%K~| zZelOCcss64=!M8g#pQD9nbf!^*Y?ERrE5#6bwBL5P&V6L^)9%R&m&pA{YwQrhkxp_ z)o|nL>0^_{9q1P7tEj1>fsM8Z#2mh-iWe(c7(7Ph+8R`*5c=8i392{vxSbqa%zFWc zBYBLG*a}s09aAE@ewkHrYNq6;`lgNj9F(WWW9R&`PD7Ut0XVontJd|)m z5)!~C-YJ1ep0sG;Q!Yqpj+&{gsn{^>?|k@v&L!o00r+5iR^7$k)czB5H|i9n^I!MB z_E;q-cO!gFE~SRS>U7k~JNYWZl&;7su7vlHCwn;nx*t!v8rI49t->tCvTqwjL?GdX zcH-a@pZdDgs~CSKmknGd^~Jk9M>eArwDyayyF_nyIUUZfnMn&Ee0htm*MAqqoROA0 zvYqf5gr&mBtLJ->bAWX9zd635o1l%-eN^nJnW z#wO_cBd30@yO)x0I0skw@Nv&w59Wt?Cu%56(vmp~B2d`QMES3#btTmdwv)Edb z_+WM4S(mC*fWlP6Po9$8`w^A9rSyg!Rg8b zu8EXjoEky-*!=xI1q8;R&K?85WS-m~lNe>PuRC{bOfk+ywcfp7s@pwh(cBAwPM-eQ z0ms+4&tN$+>vHogvS5>bA#%M1re$pwVoZY7vstMKtb_W4@o7JhL3>vA0)~y`*kzw~ zyYHM>@#b;0KAXy5rT=2q#xDT}%379}T*{|{S{43i0KxQ`DdWi8&mDdVTpKD6{i@&S zyyc8E7l8IZVluaKIFUJ6`UM>hq)lW?4o(PSjpoS2PVmCjHDA(mcQ%#@){&0kJY`a6 z5kG1jjF@S}XfvLVG`7Xbpa^MGoAqBmXo>Ag>nng}F_YUhx8YkK$!*C4V#(Lc6ao8_ z5jL<1lMiCGh-Hcu>GnIpgE}beLVoo*V$;4FoqK>s1jiH;Wn5uQTRGeJ)&SBwl;;?v zBJ~5Xo7jYW1XrFy<^&yf&?=X{C-S+gn%K2Q@<^^-!G&my=zXW3JhjRO*LcNN$q}xV zwN5wt+Hd*NIAE>mL?(d$3-CS^VG2*^KQ#1oEUY~K1EV&DDdluNxO)nj?X3acm$3-_ z6{?YQo6+#8Zj?FtVHR?7mvvYz^BB*4hx)CJGlkX-48Fw-5knRF5?azA-6!z7pCJ=D zS;H7~B7sx*ma@z6Nnjv8Qdwzt^2>S&Gof$f5WgCY!#M#SQ>VVJId})K{%o$imIuCk z9Nf}WM7!v+TNiO4mJ>-(%$t=2mLCsbTe2w^9@$y03)!e(^Gs_&Y&u|Ehn_D1b_yH_ zV1GpiIO1@T9AAC{+6ScX{Z-F_Pz1;R$4~hWDJN3jR_#5Y7xI#2GEFU{b5sllaMZld ztbhT+IE=`Q;?CoLc-2&-jeUCaDs1#;K7A66E9&asWAaU%Q>rzS z8@UO7nE@F_qNdNkxXVg^76HRYllnECdifOsRowqbjeeKoI+8^=7Q*8ON;}kJ`b|AU zM)Vg)f|2*r3LDLnsVK^HG^=6rWa$ZdTbL-{A&sB5_J%+D6l^c=Hmnxcv*-wu^x93 z$YzY9EbgQQZ_j-m(-T5vkI-HW3{7SJ_^jPitsi#Ew-D~6E{IzZuRuO;t+E4PPsPN7MKmu|CJ{0CTB=BmD0J--Ea3&25@4P;S}bIGR{cT%$#gO9%oM7#g;CCnTlk+;M}^=O48ZTfl3-4YcDgzdf%Vc zu=$p%Y)3b4s+1p&^AR^>Os%Sc50;SxT=QkPHWeVmH}`FD8i!pbzsf?!5%IKOrO$)|UqDE<6eK5K^u!02**gFh3(D-meX5Nj; zi@(_v)y}q5rp_xLn}IjEm%_-gdazxW3Ln4@lvbOnSxNQCXFFPBLt@T8WxwGtRhb2TqVO=J-1zdNn4d$AJP z>~XWJ!cWqkWTxe|yJ)MBGIF8Y=qFv1U9-eLy|(e3)@v{AW8nnFTAUWZi+TKnUGMWd z6b#N7$)C0QrUxuNJ8j$57iw4#{D|hCXPl8&Q!^qNPq0rI_O0Ah`K(~>#1WUwi08fk zPd$^{Y4v0k=M_Ie=wIpe8-c&N5^GD;v!|Z@czACvpFg;Mn_l`P=)7@YkN@es&TNBISsVW2b1wm6jcJLty8Q43W17FN$@bR|o%&SJ z5HvEIt}64tt?=qtuX~;M`2B5m8}+9oGzC?T?CqAllu6SV2_SikecU|@RpVO;c|qNu zyQ879!`I}~o2%xyR+l0`{m^H;H&enwI+8dV(4%u*BRSp+_F%dONGs=`pzy%O+fKF2!+^B%H&Z{$Aos_euY!vE=w}oFa0l4xavL9fO zVHj_yr;Q{h;^6Kh>jJ^JIEx%!;laQW`qef3y#B3rg+X0};QbBsmp7?)^G`I8suv+S z{6wCJHF1H{?xbyjbBf25W!_`M1@xXvVB$TKlj`FW`L@1wuEG5W-t+4vG1?(fysjhP zk`fo2{3=|3A&Y|DOBPY39us=7ILDoA4-<8K(}htMy&-452ov@9(dh$sbPjvd(Wp+T zvkGz0SPYJOA;h_220u7SZLBD*X9uM#DhqdYmr7ci-=iQcLM=!*kaQdM=_CaIf39-3D;Hm}i3!BzFTjGU>lRK1k;XTGwPb0P)dt-TF+_mRi-J}QNb z(cu`q?HnQk>qyUhadJ>L#nYWJsdFQIKpX`QSV~H^%TRB(A@z2bV_-$ILXM(-jyGoA zxP`he-Y_HT!{!q`I}v_Diq&b$J8D zlVfqvJPX^yD1TBJ=lno{UR920xJ&bphtD%(k`7P&prKKdoGecl{r$_ki`m-A3}#Fz zP@_$cVw2UgQ8P1@eTwI-EO~3Fi*#;^TMLc%+OOagH&P}D(ug$Gf;?k20TZt2ISYk# zq+oA_&f)ccIsg`TUbQx?hHEq}-8+E7a5{y=>oGA+E!}b_%)I!5KZEYZ^{wbq)YiMJ zqb!6&xn(d->)+eQ(>znJ?k-jyUc5)55T;2MC#J%yuaDW%Z_z3r!!4s*WtY&PJV}>| z6gN`IxMVuuwKC{lbpbwWG|^p};+zt=LaMylgfLSI&P|^w53W#d>v-6}KJ`HHwM#(H zqD6~D^%nz1=e;#7t5MoWwa_mezphQkkfL~JlR;^C+^uxc&?stTE(cUTpF|eXQX2t!r2^sipeBvValX z!$-I>y?eJ5Ocw&@^_(xt?bmn*l@b7H)?|-XH6Vrkbg8xVlX4y~tT2klD`e$YkDk>& zif0|gF_sfEcQTCJ^_FQH$o2u1&J5jO?PmOZtYlW7pVt`h@440O_-wINpzi$ov z9MFw&>_*ereToy{EUETTvnZO9O-mpx#NI!_qD+7& zeROMo{O~EV{z&6NVby`NTEdOns?56nVuBGN8f1CSDSI(SLk>OKQ&o(6$zu7{s@56F zKYp)-I9u9hrYJhqADL`>3h)(3xn(6DvhCU}vx6g)ZeEu*Rhl^Dcu=mVbt-0ooA-qe z?TE2XW03Y;$-SA=Z(L{{=2S`#(pcFqL_lhOBYa)y#&7%bq(5jIpbd}`K!4qNWp9*K zx+3Ek_2kULP>BWOF^(v>qsq@I3oQT$a(Wg2t;N0`leSl|zg}0xePW7X`I*pHH2W`A z;Jjw`-KuM5(%t3sPkXP|=nxWMei8%|K`sySBLv!n{bsgz+^l5VC_!)&{zD@4oEq3} zS$Zr!y4_>Ex-0Cb3Q$C`!s?bXQRjINt_Xfqw`W-S>qp#TE&p7BsENoLWa{C8AW|x$ z6+`}>5WK!1p;Fzx*X1q8fL`&V8X<wYTd)OLx*dSRmi)YNx#qysZchTO>I8+XkGY zKm?~teeJ(8+|tULP#!doyXkN6v$QoY;VLJ=W$ z@28}k8qv1aGzjLi@5pDIFTv03M}Pl;F&_O3Fc-j;zdqb=KA;$8{C{^t=-BbsuUBrJ z&=pYkNq?_pNvn{U_jRORBgi~BNg5Z{D^1YZC`0|oA zmBZKUTR*ji8axBg*hHE_AR&@tie6^5K>!Hl-5=&T0`$_1Xa}Qh{wN4O6LF5$AW8BF z00jddp|c8LJqLb^+JF-rm$JDvpP%SJb?mlU7_)%ZOWM0Bmg3!SvUG@BlZ^=oFwkQz z^dAD4)YFI)tN=9SEndcPuW+!!0}9znFg^{cXl2I~NRv^z&!x#GZOkt!+^8vX`NsbF z9Tp2FbCv|=est@Yu`IYzW)A8lqOAp-dTlM~%QpaoZu@vLM<(w_D<88vCMw_#=E}s| zEFy?Pd+m*mJL~|r_Yt$L?k=J&LCey}P8!`!;sF0V@B0j}zo3Oaw+{Y%_to#kN0

2$d}Ww3Vyz zb&Zh&MP>;GkjcIzxF&;q_Fc>6ucqp6wily_Ukr+vm<53PniT(&&SV%86^lnhQoa&M2-#R4cSv@)?oKjWC=MfbSW zfB-k4-W>aqh z<2%(3l-QiAwdau#j@VBS=KgN!dU%V>B^y|mduV)GLcu!Y71CDE2hSYc-uWSa6ihPj zxv(pL{Nh(5UP;AC$C{N~CTfjx1pgu-XebX;P6a^kb1NwLDcgpH-; z0zr|WtX=>i+FSK}7{+=0Eb)1sFi?2<7|G1pEWmrZ^C%(k7CqJlN$1OFA6A!FT@$s) zy3vIQf7P8ij=9myu$11Wkc*Q4w(AA#^ahE;$1! zf#kcUhWzYIDKlyG%q+Hn0*-5oN;s7f#Rb-8%H&E4(S1Z2>y1P$>P^FEj@59YkvrwB z8Pc3!E{OTw<2thR+~L}dEJq9@byoM4#ahv(Y11U{A?Znw0Ix<=@Y+*KUEgb=^1`r8 z5m8UPi|2U$)WZ1$k3>>>wYDYqo-=Zq9@g}fxo=Qq_@GVP#adgcFIdAzaYDoBJ0f>Iu(Cw*rMYGCs+41Yz zHq1PoGmOU{iE5QDY)y2fgHfhmRyp(ZdKQT*D&lk>+~1VsC3dv%$tI=Z!@-#tCi)Un zdX+=Lfy!4Meh%WxcLyy$h_4m~yN}3c58SVHC$$FlDT`JxVMHO)gNCQ^I2hSc9eT#t%)!sSA<-;eCW0|SYHwdk4?@xu++%XB*ZFq zt^)IKA_t<_r8w+}v!B2)VBoID-*mFVgwgG(0lb%jX#8;frDn+!KFD+1CwT7iX%W1< z8(YbVJhJ_%g}a%VyZ7&j@deq|fTOH6{tO4Old>6W{bc}Ht2Iwm<-lePQjfQij)T4u z@x2GuEd^^xuH+d|u;x-TjkQ|Ocf;gwP`5~cZf?`rUbfIIWsUvhdD;k$H+IJIL2`K}oY))ki7yPN>$z8~HXYEOQF%_5QEw{i>nSA;= zZ(e3sUh>0hi#PualqY*kyY`qfMfmUdBK&ikD~zyl$rf1|!>pHY>V@~CQ)@q*0a_|& zB;+Tj=z@6utV7`apNpd8vemn`J{mb)%2N$eI#2MkPpdu2>>9iVfmvC-X~aG?!QN$3 zdpU!NjP<*4N%#*{1z@%5AtG^e`P>r%dyYH=zs~dH_jKz{)?(OPe>SWodHsgKd^d2? z4eLBcx;TMbJ9qvxU?x0+T>~WJjtweIHt$_RV{F4hKnAMnWCY4i6jUu^c>KRUnKf2E z#4%R)nYs`IOg+E6JN>3@Tl{zWy3=~>=G~O0>=btaq!Z(wAbLz$wE#+w{-U zp%c;+Usx1Vjz5Kejvv`;Wrj#}(|6re=Xn?$*10EkzN;qv#})Oz#aF@;Hu4R6kt><6 zGT3SSS1=y_XSV^eai~&u@pG`+vsRwC2?!7LPWpD|K?`z&9t13B#{Q>mH)zy{pN3Ze z%bDr3$cuM?S7X_lN+Ue{KOZmuzr9#M3>tEAM;AAO0#-1m_qZk~7)^R$( z26O4ne@!s^PqeH{3h)$Yfm>p>^dTRQjmDi*r4$on>Wi}K0bv~)cF$E1$m9LP(H>4k zW_k1)O&xMfgIkq4^c4CX!Y8{$#MnXHdbiP?$5kN5C_PP(rsEC^ z{#&W$&wiI|2keT@vt;3YvM{^Z744ZViuyj~^KOyC@9i9`40>j#{?8usqAD#b%8|)A`I7bdjfTM2p0jn7bv>SMrYIl~L8A7Hcta!^E&`Ta)PTjq~I}vnfL}mdn z6-;<*R%4c)ZFB;S+2YzEpoUQb<$<{p3jIJ42L=r&;^_v)EikM48hUB9A6u(55BRqJ z5KJ?`C=!ZemL9e6mWNmuk~vif0$Q@Oood{AZ~mEg$@GpztZQ6HdjtS^+;eDa)xbvY zR0Y=UH%Ts?cA`!>-EV$s=8HV6gHXY^)f1A4Ak-4^blMlFYeYRhA%1LL9AIgmJvr}t z52SBh2McWm0TI2$%*ze6qGvO^;M5d`v^aGPPHJ&E9nIZc&L)KzQiZYf;xXLnQXBb<^8nVsU*JL-=F*APW z)OB5->+`+8_v3!tf86&!J!*O{=lfib^E{5@`4W_ThM>vPt!YvjzM-cQ*p&InUIVC!65bo0XlJ|7}_FBeJ&{dSNh{pQd zR`r*m&5OVESh>09c|+TZ3vFtr`b&>E4k;))ZcQ6miOO}K9Zea@@4VjB$YNy&Z&Igh zG1FI{4eP7}SIJ9p7IeGR_n3atJ??bLF)2800VIQ=Xf_uCyL_m0S!bjgJs_z8het-M zXq=9$*Y~5g0=TcN#hKBzez8Khvvcp-f__(>VZ3DHj{l{R7i;feg(Z?{Tq8>)4LS9# zX@;3JlTyaV+XnYc!dF}yw(9tIMtw+{{BF%Dj2cg(i`gOCc$ahBYU6dfLs8~#%PuJ? zbywZq3a{*ljpP0!h`X!=evv!Hta!NK)QRzPJPeS!9}--p(#ac$>ipJLXO&=h5pLl% zMN_b^^BCLMxV~0w4N@xd7W8(2zS%oiLr=s3fOjkU5NVNAuN$mEKn-~)Ulx4c;L5WKjJfaKRg z^A?QzN~585+O%&u4in=!$gS2=VqHgEVqI)r+*{mfpm9-3{mw?oSrOwDCnyv!yDNR{ z**xeemi1RDHSdxc=7&^Cxq@&y(Mw958FxUsVwu0X(1Kp6Jj&wY8vtX-5E#ZaL)jNf$4X*gZH z00y@K;n31L_7P@-0x=4f$9lLOl}PcKD0Q=3C-Z}Ji$4@cN2-cD$rUN5S=~B4cVhMr zJ86d-&VwLafhW(?-k2<AP|)0Mt97v{ zCcSiXzo6V_*tU~FOPGGeaEL%siTeDXk$fY$DVID~8U(r}4ze_V7o9#ls`Y3Ji`tui zlg5Krb19yCz1Hf*!3NWKVfz|q)%n3PX!vbRVqv<-^mG?(^`y3u%z#RRF=3=6A%d&hSJh=lL6(1 zviD^$qp2P?2)e%uNywc!8ZQ8)-&GoIe-1y0UX#GVd>lqw%o7FS$B7$wTkG0C?AgGo z3G%(s0f$cMt5--7J@M$;DvwxSRdssJosoKv)4cG_!BZfF9%tG0DfE&0$_+;AZcLO0 z%`S0hEDD~q2?sHH%5b_$?F+TohmQTlxsy>-AV)-?bOV3CWBw`L`Bc*d;T5vxpHjFm>0IMK=#ic4H0sk~G5 zuCK7l)a4bLeg6G^ODSL&!v1NfU`74&L(0RHdDWx$Jarhd=7c1eJaG&;_LHU2Y*}3< zJH(0UQCn|mMaE0>smX;T z%PlEuW`OJP4p607EqwI2kxX6V+scYT0E19LxChN`pC$ZXKoGBT1J3LdA3Vg)%8y5A zD#QOZ%4>}`8G$4T_aWGA+e#1l^9sIp?JshAPHh_sue0*=i*y3LX5MR$gf_*6a?O+A za`br*MH0b`*_Cr-zR>apd}*ok^TyQ$lX#JCk_vNUmj0F@V}n@BE}IAWvJND>$!0k| z_YNapzw5C*WrItt&Bv*w!}5`EsjK0OaMjUnHxGva8*;==G&?IdIbZ)cE<)rPdC`tL@1=;rh}T ztkU3$X}P!CNU28F*3l_#)nm0 zrRU6L2i9m7ZCh{Db6Gfz?Pv>wCmeES8&jS3=TO|nA3UA8SrcXjYjt@c>OA{xWW6Gg z|7-O&@D8-ng`1P%&0*NdPT}tpU8RCtl$vnT=2Xs{a8U?LlZLp_G*KYz@uByxs(!R? zEOL~HsrU3;Dv_*P%KfEiq-^D7U4_MJk9NqMQd_uqpg1t-`J|* zXHI-`+A6gIiK)$d_0=~Ug}dx(`8DirsjTqx@x>gIcrIdds8f=;7+T<&yt$H|=eZS$ z^=aWhJTDa!@{@kyQLQAP8ZM5&RX?pot0t95p8_cqn6JVWH=4L2TEk#be;Jb5?oqEU zR?n;(>Qb!$YC~D{)8^mi^>e7%VMGlX7hY2i(GUdrs@87=v2E`xc=_cl41{gRVjOBX zqAbK>nGYWq&*W~4;5KQX?H@P18_D@S^yMF_&9OFy1Il)tt(ARA@Z`@7KisJaD4mGp?TWZ5xCh3)i#u=-@<157|36OR$xCWE}>M$(gV zq%S8)!3>q}mh%Svqo`+sFefL7LnxBl_K#_zZfZ>fZ8pMRw^}dMo zs(S~=T>B#GTp`98A~6N!<$>3f;d*pZcDemR`LVW0AjY26hB}fS3`js+{ymSD0?UC? zqrt2sE)8!BUkb8@zVui)NJ=tiJW}nEWs{_F(&osnGb?2@Jhi&<0D*X$Q_X{J3Tf-f zyc^>;-b!CQqYemw#vZo|kSg%pSWP!wAA42Q>51qUodUPXH&4E)tKy?zGEq2BQQ@gI&bJg#KP8hJ zasSz9mG8Q=67t)^_da8&E{$Rkqna+KD0F`r6u6&`b1~I-s5S6zmly{O4+ldyYxXX{ z0u&rLCIcqr%&C3vVXTVV)7TZBI|_&tb$ypoW`k4J06kR4gCy$AqPe8Jiw6z~)?qA| z4$HOw{lZMasnx8Tz~KAepWBT^6^C8VKmSVhAPl57fn`L`R=evs(=^6|8rhhnKeaCudk};#f!pqoZge#Z2st;{C zeSj&#d&tN50=O?rBP7bI{4$wGa(kox+#7BFUye}Bv=L@4FWH*&O9rq)z4O!_RkW3SXsmGS4AIUFR?=Z9d&kMpt5lX{aA-}*WG#x#NQ%8_g8@FuC5-G)d)|OH1 zeJvQX;r-e(<7>-SN}I zqUa}!M>TREs4GZFAQl5PMBj*65^wiUq>U(CQJLc15os@mmuEO@nM^jhy2FrTn$sl* zvIFo3GKi{GEi)Ix;_58>G9`hKE#8Er#HU629!6PvFtjLU9qM=E+GpR6ZcJ)wtAMx;v&DJ|#b5-z_AKNBarN5GYGkn_exVjahPyB4Hiw`4T zPCeZ+FqaykQtfL+ zD(0%C2GU7|kcXYPhqYPw)hjbntTxcfUOKj$pam&-Jt+DN;N^>5@(T`pt&3QB_@A``|qLNoS51)QW zsQ;`Bnr+vM-_%^yY*(4V%;K02syEHs6fRkv!3;byVpU7+V;tAa4oJFUHR*;qgCzC55nJkXg5#jC{lj zR%SiyS-OJP@{iRH@O3ks{{qf-qIV)iHbE?)+R*$qG<=aLokpxj|F zIt)kFC}59z1j>%*GUmlZ39HgsH1K{CA z$kXM3|C|9E09cKz6X5BTF=v~=Nl{NfPd&G(Ctox7y+?pJ|M@iEINr=8B^$Q_nX?aZ(tTiD28`fyo`F1v}$a1D(#Q*RT2xlu#>nM(Z)F-$q(##;Q2_Mx|=zg$lM zWwiASA>5WT4KtDEbJFxzTC^Rk)bJ@%vo#l1U?o}>xIpUYKS)EEt*$S*l|$W{fI+nV z+Y|+*FC+juuw<5C5vIjosd*@^XfxH)X>80>Y))r>uocjTJpc8o&^g&+cK1@11wb{Z zA0O2Dq10G>5Y$UcqFO%`o#cb*?UFt($=?tr9QDWoFp_>(P$9ui%d1tsxT+8uq6F}!$OSS~NPUl=GT*6k{#8By-}x|}Js4h< zWLk$W7z)4dyw77J3w(0tqt(u6Z!MWuKd8~Ev0JX(_MvWnUMjsUwzS=+M2EJmU~9@} z3U0~BFA!HYwjWnmh+Eg3V+ks0u*k=(56({}7EV$c{gu;veMERqwIO{IsJl);#cK=% z>bZ-0f&+?r@|TFJhc!k=bW>*7rJH?ELEG|CuHnpGjQxK()$HWGIC((Y6b07(`ba`u zN8P)!Xh9tyrBl>9Q{AEhWJ73E!O_IWUT!&V`x$x#3%`xB6J+iPPnxp=MWZbYMt-&2 zPwUi$o*nJJkj`x2?p<^TJSoX=JucW!ct^JR3V&Ax2n7L$#c6hMdsC{f5+cE}I~jwE z@EqjpG4BBi8dK>!3PfD?+&}{xUZ$~9o#?<)DEg<6a0S;S%QV3c$+opw0r`HMMS9^W z${(QFt79lU!c7jO7)=1y|KPO`_Qrfc6bRqTE>W*1v~(}8o`Z?($Y1sqc=>UzHN;zd zWv>ZG|LJ9S>U)oWIcD~MNssfRd~|Riu1Wrg`O9$qx6AZh&ZDlA9*RImrmEIFC^tIxq zZ`=9x;uyo-XL|1=U#q&EgkK^_aEF3rKUH4`s;zP}Sm25Jff9uyzh={4^C*}JZ&^&A ziY7g~C*H0@XYrZV5(j#O z;y_Q?V$bh8f&^;*zK1>uqwuO$n!C6^+-|t=sBO2#J5RY6%8fG?+cYPTixK)IT)2!E z(5QaDEh4sTp+YU;apQ@;8uNKgY0oE-XVrWpR$N(pe%kBzR7$!);{G zhP?D$OX8)yQl)`KUy&ij8GQOkFR5l`de;b+F*dp=qbMB0kMYK+&<%$87LqY$uOAN< zL_56SB0FvHIO%PSaTE9@Nh>}hX_D#^g-Gm)rkbWQ5n}1@25w&fA$y9H=SGrOB*mtS z7vaMY>534vi zW{i`Fl4iF2JVwMwYfP`4V~I_M`t&f5ruEO6X9${`{q9l6_vxQsy}iMT8+@#(Ar6^x zpDu`=wQX2D=(;efZ4Dx(EJtgfI_5X?ZX4*IFPIEf0UBt)8EH-x99F=j&3M?f1oF~{0FKRQ8m|nV8vqPPI zZ8ZN%`b_sJV=iPZR(W=+{xj@tJ@5K6nQdj?70HF&TrI_C#5@OiNBmucwI+2c#Q9XF zOrvLN*ll$*ypXRPs@ZML#NP<>sZc*T0l*DUOuW|-2^e*x1iRO=>w|yRnn!KPU-7Hh6q#E+wUHBm_uWz&vid;qqCDOGS?E2tsGV{FE78*>>MkfknXj*3TD`Xfk$QYrY+v-{n%Hal}%D6v0< z9wPyq2lOVb%{K~`mkK4r62$GV)NB8O=Q+X_tW7v?=yc2qITueSqlRbGVjyReDC!;Q zBhu&FTPR8<+pc+W<~1P`UC5)a`6M5Huxv>7ip+n<;`d-7Al>RUFS1y;B6rq|;<8Qal5f6s0_!vRnUM=Q8MRp5L` z^?Gj&YcaTmu4eyvnVbta0+(sEK5`J}GUpaqY0E=7J%>)1NnDGIOS`E8{n$HvglBQj z*$xFhSYZzG_3Wg~S5^=o>zXG<2V z-V}udw(3Ji70G!3BL*9dxxS5185VM>e(93U)6`w3x6y4n#_9XGKa4aq6gKv8LsKUt zahvHmVBq}sl=X6#W8D&Au*LnsD!;hw+i z8gk9vRMFz)F3D%TGLy6VrZO64^>u@5Laj)(eIYRFev$zoCGo=?_biYWV--F=1rAMH ztQvhuo4Y0=10!(SEdTVqyp^v`W_5Mr^ClX>y4UR8Txsgi@=sco@oHzwNg^`utyF5!E_HaDO*j9#E}mZ2H_#q4OI zJsY*(Y3x3{j+}GH`0ZzVQgYvn8zr$rqo)yVv4ZzCqkIJk?TDf70R8A_n-2t6%FL3< zVtuqV(#8rogBs&7=h*C9e(Spd^&faBEs-dJJ8zskTOB6E7fIWu?>bff*CHM*5u31^Ok$;$H`LT2XtJy$wYGR?g=-ic*G8beo( z3wPpPat@RKOvcqOu9OzSjj<~n+1l8b;>ad`ZpXnNMgWI%nQR`R|6G!?Td6a2?g@lq zR^}cejA1<3_>t-n!jT7tRges1iKC5iePbN9z%9?1{Ft+)lOB|_RLt@=_6OCEjbTM$ zBlQU8QB?I<9X+@BjD@h55~@`a2cZ^V_dkVDm7T3BCc}#NMB%Av(9xLcR%NV&I})oA zOMO+8{?8DNcn$kVlD*a06|La2<_L@a0iR7H^raVB1@z3+6?pHVeBTn_z|h$aFK9-Z zSK>}p8Z0fqKcBmnP^IU|K+wx=<#!ilk;Pd0y8{sTKG$%;FD8Bn{m2+skb1X+QOkJt7N^FeeanCC6If;fEk39rX~%=mJ|1vJX}- z5BJa!(yxWz^SetC6s-7j&QD7HvF^_LuTtvtisDW4v$GCUE&nXwpU+~MzNlLC7X%%X zz*DUsyXPi?sE5-043?5CKV`qUXes)SSpjqMK97F+kj3V%zMc+`^S;LYVMD1nAc)N} z1Ec|t^%1HTW+~0wf4!Svv4CI+B@P}dl5*}Oj(th5nb(tyf-DtpqTHpKWBT<$q`hwn zePvL!Z~=G1wq5TgCE0Gc0?e;3BXQ z=P};yKsaa}e?r=rHd6B0ur^XBC})z;1ghnSWKIAS zc2a~o*^&`#;k)#urut$Mw)C!!Ie<&>ExIO7AL4@0m6E_`x9_c6+u+n?)#ZE1;X3t! z-91Zdy3nss31_bPTEvZw8{xp`ASobVf z?LesT&8A}z;QaxJWyJ;dJI z+~zx}IO2g__n2`!37udmogs5r9Y{4Qw+U3hM^kV}or>aMvH*X+n@tw(=wBVIUDRT; z(daDYids1DCa~2f$Pa(LnvXoHi-wJE>@7VJ0UZX<(F!dd1r8R+`Gj*W_b76#PiVbA z>%{s5r8o(r;<)+?4xg<-RiVw-3Y%xflRl6Oc~8O2MfTW zoX0)+^=%wXbb|W^5ODB7x)DU?el_C{BRe1I^1iS!n7-E>2+6E3zs$N;%hrD`RXjv* z&2iOkxYKSc#$HFvS*!44;Z&FvBd0GUdB8Ot;w+E$m!U6C2|1xT+UN$`K>xuSqh93f zB|aHd4E@zJPv&b!KdTARPPF-=BySDJkG(QvsnrJwTvAj_uS)ZLos>xiZK!~IVG<&$-Wo-gv zYAC|}7nPDR>{bF3h;ujPUsA$*PP0(8mdGxYsuzxxrQ%iOpuVU5w8H6({2n$~_z8a> zm%~AJ*J$A!?iwwx&!o6(crA~ch|aVXTUi5w`N)Tu>w8&J(q@Cj^Y-b9vB5smV0hNI z-fc6C+SPyCXVXeLasA;N{3s|c8);>_XyCrjba8ayQarAy^aI|d-F!oeJBQitn>O}V zkbZ;9$pwd5fsTrCWSOVn7i*5?0H>~~dB1y=7h8u{=jl3cUex$S(KKA~4MS^VqZ7|iI^8t$ig3-zC7k9G zV@r|1tLN{b3ve#hs`~8>$DM+zdO2^rgy`T~HTI-hlnNH%<-+yGs!CJe-WwD$j$|Lk z{n5?tImX*tcxlkCGaB0HXhp#^C)>|;dN=7JpHGG_=TxwbQM$P|Q~g5K4<)tY5t}}K z+>U0Ri9`pddq?;~tbBXXa=i-N*-=j2@&jS8&UycdFK)x=rojBdtRTBxk!8B9!E*F> zEmX@444W)I6m=mkZl}7%&Z^AU~9vuhv7D ztOV=+MGi>R>J>)#-k+Dg#V(E^o*{S5c0NQYi6leBH(WRZ^#tf6oc;IG=WGbkbp;rI zOd$Y>PolQ~K;jS^k@IJbo^azATHlJJe4SOfps>Cb%2v60J;$D^>cYmoHm9E!3s?oZ?C~u078uck!tNd%>`iBC2Qea5CxVrCEag~^ z9@}5p(jr2AfKTmNg5DUc+w(07BJ*G@1_2?eW6?3TV-r=yy~TI28fFv9=MVKbUfqpG?s}o9ha=h)Ow9}Te5DG% z7g50+VNJO)uXhx~xB%R>_uThLoQf{2zIHNsvb_<}A8Y-ASGmT&yiOO4!4|8jH?|pE z2YfhCxsc%ZHL6~aeHxo<)yulcCqHKumt0JOR^YC}e>F>%qxUk@Gl?{ZHm^Ztn@)T9 z`dg8fXTPvJS}izqUJHl5$C^_<^tu;Fh8fQa`C^903j%r!TgK6yef;@E=xTPr;}J3B zLlU#a)BPTKWkuQ?U(hjcj(@I>m5L7-ley*W8Lj&Cuel^5QFGeHGaur-Ff&B|S^Om= zYK1gJKdGK$;nRb-7dH5-+}d82B#Ity)#O*7eV2vyz!Z!eyEL`L$^)kMDK?3BdfAKTwcgAOZe`b9KxIpIS~20Cq#N8*$6gM z4=VY?kb>@r8{2jirFz*1*?pyK3dj9?N?uFxf9WXV5NAB6g;dd0?=`BnBOBxqBzE1# zwaotdc3gkRNX8E+)^T1Wgih_S2egUa;d?uhvJP5}J>fpR8({Gt*MJp^=UCm^S{)ny zpEgtK#;HKRuH-=`5qM`h!IKtaEfF?WuG@n8bI!o!YUxF;Y3^ZNGUuhR=vO|6b%wXa%s*Uz@VjilUdsE%m#*)q?XlTEr6D{ILj7lDXILqn z3;4#p*dvdW;2g@a`7M>nq|X?-{i!WQBl&@xBQUnKIj9{YB$>m z660ySIzaQ1jwH^Z4X54#OZxqR9P`^BRfxKYwfif}&;4G*a|?SZhn@}IhmCJ9@^23h z>fm01HM(mR-dgTCw*&uuMYX?jN7STA2FS-)wON`W|!FPNqDrIQEi3cF-R%_VRb z`r93AMe57=#+DNpZ_}qioxY~t`PV$BE7CN1^SfefVeP`m4*AUHd>>$$!~LE2!_FAbri(i0=W6rz(xJWd5h4+EgfDy2bXkih%I z<@~>fvDTHrNkRRCxo1|Y7gr9BbVoVQ37Kh3?CQ63rq;^DeS~)?oL_9L_ni)P@U|C` zm)DPdHQkBJF-k|wy@-9~c7?inK}=p^=05CGKauR+=aA|*R<%)CXFD(q5=Rmg_-8_0 zS^AeVed;3oy)-Yz*OjE?fv~)C$K>4E?xCi(x`}gJ-_13{qK7>>219PR#-aUYOj9ZT zF|K)6a;Ageu)^I%Q$dg}8|Y}(c>C^JabV_o+T-7Kodxu3u1U{Ve;Zq%*WT)Aksdkr zm8Sf@Pw%EIv)?Jrn&Y@6R8QUZ{Of}{35xtarxGZSNBdC%r|i{~9tp<=^yE)K_SEW) zO|A0kQYa4Fi19DzFGI45m$&wz2)I5&cj-u~Yama@((tRV5L2%SM|YwWa&SYSnqw@? z6QbEJz6c+Y1Yxi>@|Y#=^`wt-PJ!2Q0)pZF?pF9Zj+LvhKl=|+JhLXFxke-1rBP!h z#y@r2q>;}|mZp?DJiYGM1gQ_#wYx~` zmz~fYv@bcO8|st<%M+a9JR_eJT6Fgs0eQJ;4tWI9=e5x;c8@CksdioZ*O6{rP9b_WAkuA%R2!lYi8-BUs?Uw=-++ zMi-B`?K5CopS%$El88g3@XF9T_3q^1-y71MSF&G)`n{i?(A>6}X>={T-h!%~^HQ8x zhlZQu_Z1#!gM3+S2p@WeHHYumpmE@o1Q9iZk4 zO(MkeT}s*O?_m~vhVTVttS!VD<3kh*YnoG-t-G+BAElA=F_$Tg44F0N7)(<}r*waU z!+H<;h;ujLh?#IIqIk8amtL-ZA;tZx!_~>S?xq(-cXu}TyqJ7d6woRykI7*A_* zDcr(!3{23+;s2VUcT4%^++Eku8s;3>x{uh+|JO_n8zpUTku1n@Ibc6)$NOKC=;q>y zk}p2-r?sSKH-5dOKa2xe2_{>%|GMmw>#r@sDXqQ+pBgX4{CIcZ195ZVnKCD$Dq(f$ zXyn3C;-v8ml9G!jii?ss4^mR%Yv0WxQwUgcseOmc@QTuzwEi`!FN*ibuelS@ zuN^Ek8}zQLu4Q6VTH^G$m!WafXxFiX9$i??AR3eIyxfuI)afu&saMQkEZOMJ45zL) zCSi70p3TiN7DfwAbt#_W%CTR*puV*qzd2kgCR7{y2QPJ0$pHQB8pPx^L71O8ZLn#x zj84mp;Mi7bQ#w7}#dY9;mi*_`$TNFy zB2?i1wi()9xqd^qGnKeIrI$z2M6B-EpI)p6Y1&#^^~ym>E)jte@3)?$4}W45r-{4Q~~zs zhL2TsU2gs@!#cfsWJ=+6s=yaEKf_dNI3e_xaa4P_9Sw12@|ePzJcfA?XH>NTc7vCU zP$!%Usw_SXmf?w1BmLUW7QH-Xe`V^@Zm2_lr2?wdYv*)baL?Bo)DC>#ic@E;t9sFN z@yIgN_QbWBFScHpk|S%wp+>=TF;OO0GNBq&^OUwLyR#MqD% zHuFewi~2MD$F~g|$#(ZDha4PgwT%af0*pPh%q#!dXZ(=t7`n|leJqS*!Lz-BZ1sX1 zb5utXS3HuVBboh&cDxLACc<=t($=A7JeoA6=4xJ^rR4`2%`xK>a|QZ}kR=4NS*05; zPf|jPwuRgUN8p!d_BxW-fT=4@__AUT>BAgbEA^kyXFQGL8V=p-18NS z3k^oT#}VW#)MKX^gZgaJ*PujxD_D1#YW1c;7h=pmS>+8q$v^hFym;Emq?(qF>bRB& z3CK?*hrtXMh>I(#u?VXjOdJg*=U3~3?w>R(x(Rr57nAQy*WFxCeb4Sg*AN(L+$ zC@-dCn>x|cYmRdm4YN3!CEqmD70#HNuv%(tcPuAX>{!%1)CTENZmDS!!u)CVv+c17 z{OaJL>ouLw2W6j`#0p%@=qpIh{4{;J?&QqtLN)p_-PvqL288%XE;<$n%pF0ybUuSK zW)Z>oI9IDL#qOFRE{;E)@*%<~n~oL;RAbGg1?5e2IE>z4AD=Ok-{nlCnZhS0zT|DD z$Sq!_>^aOYFA13sewWnpoqgJ?)m^u9;#C_ZEYM~qQMf)s=Hj8z-U^w;qE-82+cr() zMz_o6TVWur`)(?Xv-g*&RUQhXdwJrsqftLfO6+!08z^DD3S~FhjvJJoxC}#iStB{o zLSZiGnrIY;m|rXFamBpEbX9+8bOL#OdT4hY(v^YmbGT?yt7*RC@I5k?*djxB#8xHt ztZid`7Iq+w?hMY2Ra%NK6XMvDRXCM*pIps5BYMfd_QpG{3C$}XVQ&{j4n!y&KKv@b z-7c`;QuwaDSBgdUI|@vkK2#DUa8!9W)PDC-fn5iJ_a+?l%Q|0pZ##qMYLTzk_JoP#o6TikxE90G z?aj@^JARhacW4X)w}?n3ajvT*QcA9T&SCpq69 zhtLvonXD&jSA0X7vsG z?&P_hr|!%|9W^6ZXZU)t^`1Qcxo-`z)lF-f>e;?xa5whgTwueeVFP;%_Rd61VR?mK zp&89$#)OiY6+1s;XXFq*1Wit}`f#VD+h)ktFr=+V?)>MUhs5nw`69u|AhKk80?1rd zvr3-*ll)`h8^6ggzKFj*61sK}MU!d#uBQ(ePY7w2U3q<9&bG<77%ubp%B`mryJWn+ z9oTg%HkNDuryBkKl>4VF4vOr0YMAbLph353VWtrIQsc&_lZtyE%e8bDd6V~;e4-T( zxXvgJ8FG`CSMjUmuQEs)W?m`GM#>*4%LR-pP1JEWd-2q}l7=Fyq}wJHq)e1WT9;74 z(3+8XMPtjl^DA;1QT!%FZeV4)Tjyq(pGlsdAhN8PN`Y>vr(CYDV)@SG*7Fj8bc2ea z^pPVEYzuOWfGhB38Y;tMIE&c^!ktI7*w1u4!P&hzwtZ8f6U7#)Vn>D?YsX&;OIZTx zR;UP)+5*u<1;M&~s$i)o2OF>fxKoDL*)n+ne3ws#C#>Y)?I{oIIn+bURMZ8|`cLjX zruDx0V$L5VJo=p{&inx~L$_7{NGr%n>Bx~I?&gp{?k(XkytmQ-dFgXpx!kF{pN}S} zA3kzq^vCyJ+9#v(PiF?NC#n|sYO0II2?`44s*zRh^=@Cdmwm6~EHbYJT46aMzI-Rc zFj#1>mX?-dHfEo$M`h8V_e@>yr(Ab{cpWj02vga&PIe$jxpzS zI6dmwoyd=8K0Uiy!;&7D1DY%q0jZ@1+48aHsLhI0{4L4e*U(SaQmAX`c8la@K7&!Uc17zuOw4p5I#%_tXkHKcunI z0lV`wmjR#S0S zPwjgSIzy!V?+0xWaoHTSeTWm27m)UMlwxx4@Vjs z`LPW)p7NW`6yMVTDyKfu7a`_6URtt{hJAl6EXnpIm@kYDRMu~5WHZ5N`Jf$sFnI2gScVDRj8#lq zhT7oXSlgEP1KwZ%hzkm|mMt*9os@srV);4@RL*cI*!r)G3X_V(@;0px)V4d+dyf1n zgR!LFg~2)Wb=$Yvp!M+XRuxXOx%Fq|E6H~C-hW*JFB5q#NC=@-*zJWYt+Zle>qUC=W#Do#Q))LA9^uByK*b2*R<6&15Ia1hYE6)RB1UJ-?qZ^HJhX zPs(IdoV>i``690>Aa)B8DX19; z4ixCXqvix9`&;Kik^tb!7jg#byI95537jILBraT-v9Q~K8bBq`G>(-*k*o>VVX7^; zt;{i>hmLhSr(Aj8tvt=n`s0Zyk24S@rEW_UyM0MdD#w|m|51Q+gz3aNmYApj; zmu;n)bTJ1QpS{v?a(6p6PFa+IFesFUPAFDchh}E+r0TueOl~}jc&vs*+RSzKDLD%GAY65~*t9AKivOv%h7TVK! z-C^~>@5PK!^p*f7)xT~A)3uJ380L$txnN|MihF^dYziYPEDXQW)AYleY;WdQp7DJ~ zRKR^VYZ^C#AJnl@jx=Lb;MUF`IfCl2kWZPhPj_2g&Avr&xX>#FDz(>)VJwCNk6PV= zlGnq+iZ5f!BeK&IySsYIu9Ro9TZSGUy+Np;{mkEHZoyjddxVa?1g;f#6Y04>Qz9RY zlcv+~9U%Q@`K;N{-sz@GS})z_p0mEph=fk@i&|Z(719@btlj`rX-o9-?25rVKiznDtL+rDSOgB8M+x#!&y(LO zfWB2IPfp>rM;8h%^ruJNeq}Xb^wE1I?LxlB5x#7udSu-ru`-$;WX>luswHw4shMju z@fMX|c8F_*dF;t-m;NE6mNz+rG^;bEN$w^_77Hg=OF4tYpM=T(SO}V0xaQ$=<15FK zdz{qh@J!o~9jjf3irOpcmk8w|lJ*|&{o?s+4;(V30mh?I+UD?G{fTKC|A}=fJ6!nV z-dd{L2a7a3Zz(?8+p(ljXKs0JZ3h_8OS ztjXlm%p-m_TF8`+bYpl4{nKp5{CM0*hdh-Xb%!mnO-d=!s6{gVm9$?DTJl|ILT%JS z{MBCdKa!0$&IM(qkk*2dn4kD8&|L|8X0pVE&i>E(=yg1tA$pt4`!-e5m>O_GUgj*? zK+$iY;xq||acWsbws4>hWVX-N@S%5c%0b0r!ibkuZFeVdfyT_v5x z0L-tCg`2q=obIr;Dk5!x@#`{O>i`TH!KXnF%&c)U&4NcP!j7=lxN^0~z+S z_<{5HHPdpmP>Xx0WH{E_wLq~OvO4dm9I|EI=rDbU%7<&iIJORHwM`vwnUs%ans%q) zIVy;s$_CTg94Lm$w$SopT4W0X0$$?#tXf^siDZ5`fH)U8u!^mghDt(F06H6SKMw_d=sdp|CI1AMmyUiT841qC2d_)*zv{Hds>Zew9 z64OZZqRImwa)~t9Skz}@h7em4U{akX#I>*Hcb3w;jXw4$AD$Ip&4bw(x&I7VEv^0! zBy$9q_hx?1n5CNc_~&Q}0JE};ma8I~;XM0u9htuXOrRL1x5$5wAYe+(|EwSVry_K; zYV~L{*8gKB5fm)TkPQF-tx&W1w7at6Xr@dVr`cA^^@8&z8g@UYY98iizFIR)StA$v zc?wMDd^McSgcWq@<7co2T+r<$4=iD_Db+%3Y zS~w3&hD}ZARCy((pR8;yQf?fJYx-2x8kf3F;jJwg=+>|ZS>R7Lzhj;smBE?uC;y$$ zf*ObV7+9NCeTaUi1Bur#GE%LesqwQCDLbODK9ih%oKR3zcz4?rOIy=ko!BbvgWCB` zeC=e=%#(F!rJs&L?o4;D(R&-#^H!Qt3yiFLG5KSMKeesLq*`M08HfLASwTXeVE zWH-&8Xx}$x@R#)+v!y=?7EIN#`ajAL)99Dkt8#iLquOMsXXjL>dsbJctWUNSb#<@z zEZX7YSHr)bO-511n{fH1%f5xYn`<)#9Srxo*pmYb_>+DZxQ1~&7|)65ypCm}mj=?{ zd+6q&<2oH_hLnsIyF_);YA!k@73ixV3;hnJZZiH<&8jeLN-6x)H+8~I+izdN*Y4F# z&YKSDzZp+wpKgYERmAEqg*nBNZW2+UMw(I`hPm zaEhSskT%gagY1?-4?<1bESw&9!ESE7OmmwnF#E9_UXoh$2>&XMV4F_Iwos_4Y{UpC2^brUn&mKuGQhXFMM>uXVm~<~grp?wGv88mD-1-UmC%MOK zyU9H&<4^rpHub^!ZCg#|l%@GQrGz2!kM;653y=)8lfpiPwP2BdmY=})mJO~S1rq1-Wh9g9~o9#4Yc%6EuZg77E+G&$q~>7*>ezNocYZ5CuB@}t_?ZY%DRBO zkQJ-rhjbgSxBVVPo+K{J^)~iMDs>?g!CKY4i&-eBSVd<-iewZ>{WuO7! z){g2n8vMlmZeWFocJYJ6uUb`z#}-||_G=?ssJ*$b`H$R$-yP7f=%SU}*^Ec}ylNb3 zh34ZmUW_%;zHnYKHQIK7-z}Y>y3G_U=%X>j-Wj{MEffd0(fdEv_5N8)_@6yWCLykr zp_IzZH|D71$7R_QJ-N{zW>nu4WnC#V%n*~6-L}|VA(6Vt#SaMOh1si`8JPC71wGkc z=giSmXT+r2)a0CDInzxRKe5h0QD!NVcQoJ&i~py%_YP<(``U%EAdZZHW5GgGRFrB3 z6i9FeK}EnukrojF1B3`tB#;22f+Iylx)78mH6SI_pvZ`V5FzvsB4U6LLX;!~0!i); zI=`9seZRlH`+fJGzYaN@efHU9?X{ovthMO&Ixzg-z1hPa@2EnL$H%%a752@W+1I%@ zbZAlVl;Pq)yYv|T;gfNkVIDcVC@>ue?8H%^>)53DJV0_0P5Po`_S=7Y60Vjgmj!Y7S612rqYQqt2Q({yr-8oR`P0 zVGHFCzWYi6RKzUjmMEC>5Krxs3)1>!=ZB}G;~q9P@nJQ$&tKzJ3wHk(X)xzyQOS7b zgq-t=ZvRH!87r-7feb`E>fN5hwJ_(wRflY714lwAnz-I0FoJW3%>cRRyBg~aSB$id)=glgT~}2-is4GRGZ(8|E!D^#fm+ zsU>7aFCNJ(5#Fm#w39!8wW9G8Yo}jaXH0f*?CB5-uDS(dfupX~%poVFC?X=A?&Tq( z+TY_VJ&mYluRNWaDgBe@ItGZ=SKAx*(fgs8?27rxB?^c+hkx|v@HGC-BzFm>4l6!o zflKnil^S7;;7cEQiB#%nBRavFYnRr2*rQ`^+!4<7etV`zs}qTfiPL~Dod&u(tr|nb zf8ZZu;a(~j*s#%>%)xJMk7-15d=od$VDdxTQJPNuCokpJsw}^ zHtLRkT3}0OlWX^|=Q9YhoiCN=&|9nfSZZ*A;S=4(Mr$C<+7p69ZV+@{Fo_+H<`%|F zDKr8R*r*nBtA|)?N>hkCa_3IdBVT;EP<>Gy5(@@`17MPJe%!&4X%XoWqGo9S#}Tcq z3rHUm`|@k21`kI^=X9@2y!p817c&qqwYIeMZ5gHDQ{{<a^FZP zp9f~yN4u6%^^|=d@0C*yGYuMX>r31gnf=8Ix^4PVuUMmXth$-d7vSa+Ij}W(6RPSG zzV|gt>LGx??q%KsXS_@?Gt(=k!B334ZI5+@Q8=I4WV~U@cRojq9gYAH8q=~#oMCx5 z5hh1=OK~dc!f%y%-l$R5)|#`mfhGnMA?vd1I{_!JV4r#Nz03#VQOJ7&R^Rt}JxGM})>U$oZ)1_c=gax$cla3@&Xm7xVYnCcB~E5A zdgb$E$N#!7yDdM@JmrC;|7DAHaYpllyA>nFO}2(@`9s3lig=PyGS_A0@2Ik;|Fn}7 zcj(*H$)SxDt7Jjhx4iqi>n)ri?^b|IN$xqL8ch4af~P?p0lTPB`}?4lKF^QhnEV|& zE~Fcg#BkR#h;-rW5f&;fcYqei<)G(FYbG?bXYN6Xq zIWt+y!AAH4<1;ZSsRWFc$~s^2soIO{#_N-WfW{ehU-f!Z%<6BBVi*tk9R~ieyT@?`0Avct)??sPO1OF+O$lp-~U5JFEnEhM*r#<+-K#7h}q~Qu!qLvtpbx zIy!P1tR}>0(RRCvThwm77J6W4`ia}_sLv5nEe}+`4BiH5tkre&9dkm;R`ANf8T5UN-og-aKvI?y)TnzJHrAyZqT> ztMp?BwVWIEXCzD%Gy%2;E?>ir==&aoM|Qdv)^E8kV%QvX-}h3}!pJ zzvL<$q?dw&+~+P)d#(ooOjZ2N71`HK|B4e+d56K;-04em53{|48A}ZtX_Rj)yuiBP zcy%G8rSa&d3Ns)0kMAFJ+j>+z@XD`?HNQ)QAMSY7V!8HCYP8@9;IYl=#^P<+Lg>TKlu$Nn-imbzHlZX15@0aou30-B^C>>R5dXbqoK4 zp%_>LK2bggdi6`b+l{{(J!6o2Vf;9#I%c=CqkiSL<(4SvU6DdvvUegdL8ZBOrYB8w zW~6M>c1$e;{PHS4T07@+q-;J_IFARDZ*Yw=W?{@94h4$+=fjPtv zcY@fz^c7_P9?J&xACj1HS`f7Oy&eVApl5lJ-63JHb6GqiXKDe+T?N*_F_1_^`58{V zcR}1+MmJ84Ro#oal5!Warb>M;rN>KHeR8H}ZW#K6{R#czJ! zn#B6@s-2)t#&}PjIjI;X@RWYfZPG!iua%Gp*D`^PTmr$PJh12mLx8UDk-3aDfPM~N zqp32`IQH^)@XqZZ5r{KpY#2DCZjz}Nudce=lU>!}j?}I$ML$r7Zr!W$^ zUwFPfd>;;^&X~l*rf#bZ0eXh*s#_mBKUwf$519KoX=dn4BPk6V4 zWLsv%F{q2tc+AlUU#%UArb$PdV>c9qI-IX|>foB*qbw5fCkCLGiW*UwP>bw(G#T$t zwm_uJEf{zWKPXa@FOhu;!}Y~A5?^rlG<1u4_@CzQUkRzsR9CY%LvCNhe5xO=v5Bai z?m-c>5t03!Y#Yfh7#QF66xoA5tx?>8zerD}X7FrpRAmiJb}m^UY!bC}*?DiYQAM#|+Ebr|pFt@mJoSqaMZe#82 zUr^y$C&c>~p4>9?KJ*EqW3J|F;@(R z@mFM+jiVORgwxqS$9ua#~dw#tluwoKNrXn#k(Rw5zJhGDJ6TXh#1 z#Ub~4UxjR{@!!FX#G1t4SUuaprRPur%@ZNF4i)!%*}y6xk7oQuOEt7p=`qu6w-15+ z36SuZ=|B6wGudccYqR2-#kr??u(pBS%+8H4v!kU7aGD+oX=Xt77(N1rV-#U{XxJRH z^2cY`(wDmNLn-5I#`bc5s)&^OQ$PI<`6t|9ly zm!g{yF>@w-#*yvD^wp5}E0$X8qUtM5no*o*$CeTD zktA4k7WM|-epJ)==CMc$U31SKQ@Gjr!4aD<1N$$;8qTmJW`!nW`(A zuqZ>sBX9RDPIMA80_US>hkAn=$gP7x=Aw7zoGm7LI9MqmQDO zLf-5#Ge)2}_iA6_=21(jcHzcwXXMcuoWVM8B=Z*Q_V2mWDt^@`FtZ;|on<|QD| zF37KM-4ybYV8o9(+O>Tyq=&By7qmXXNE_LE?2Svp4tcr@nY2ywd4AA~k6J#;#fbvsxonb z+S^uyw%@l$>r72%N)}ZIF0FF0zA($yIAndR*k3Xih3Ht$AzMcj%6<*-812bxzKrY$ zJ+UE9>svoeIvH45HeFj()v`JY!lBfZqBghmXzKHHrCa-~Flj6gFnn*iGYdesRzFu* zeh~o-2|0Jo8`dzbuX(L{DSN1R_P|mAI(vqsgV}=bmMFX5LGsIy z`i7Z~Z*Pk7sxQU2nKA4_h7X4&W+xFcJ8ST1U=l>;7)1SRbmmP(l5T;vCuJ=&C`t1{ zuN)-vg|mKr+NY$ zZ#TR{mn)>GG;tS}dePn;lTJSI$g9 zfpX$k@s82{MrY@CFk3e@l>To`S77=<4m+A8{swxe-gxCqgCYek{S%M4@!; zm%*}s{X8&ub1o)&5T3)3eetj5d?z+`|HmWkkD>?Lr%-!x8l(R$3y$liAL=-uv*)@- z4&!0W+@AFRT3?GMC?8!KQ>F$gxcRR>b#~49r4p3iQv(dZ?+@twAAg}t8Svn}xwZ`} zTAqAsnAtlmK4QGwNc$|mF-C9;xO2i&oC3c`9trGbitc#cd@A2C4$z98fB`4@EZJI` zbC`fD=9y@SK-#f3QcgT19fV1OZbp*|tYmpsNuYwE@*{o+k2*u292tN0G2!Wn zbgy=?Vq3OUXoW6HaHr7~dc3OHNw`qAv>I?!GbLEw4A}Y~iSo|54jk89-tYp+h!pWj zt_%3W_`lt#mkYdZ)hK|*674#gfh5!!mU$4fGM(A!1HYb_Js#mq2o1iIb!};4;HhHJ zNP^e;I0v>@yHd!^#dR#y_`#FVJzKq5AYN7MZPQhF$;m`DE1z5q1UUP<>~MII1Y6O* zR@PF;82Ci;_HaP3NGUiHi4ez{^JJ(=5BDNh8)TYMY`1<^ULle73pXKqw~uP0Gro zcx|{ogXU2N7VenluJ$%S=FWy|sNfC8_>Hx9Z(|Ec*j+(x|B)B(d{6+-IJVB&=&q85 zl3d6bfJ^&o#3@@+3Hv6PCni!|A3WRDy%_nHmVQ@oFTJUqjlnrJ=o*G? z@n#(T4C(Ar3sZAkh04Nhws*S!A30l*G3|rxD6 zJ#(T)sMB{r11S$blB7+bL#1uyMYyvm)OuY?&4iFcZ5E(WvqJ^reyiE@Q%Ao9`T*on zX6&EFL=C<5Ix~B-?4ZzrMARRTIb@|)UH?;S1Y5AHUNy??nD5k|jDdosS^K2frlqo; zrG850tP@_l>rwUL^g)!UAXQGsiEEsGILMdZ5!R@Vo_OIvYJi5o(@`@oszT_tq*-jO z3KR1&7(V^%HYqnAy-!j27cw6!8o-f`TH&+Kn<3^V&HRO*Z=tcL(SK&Lr;Bmg&@db! z3&5wT1(v3h)p+HB$BtnblS!-Q04C`BmmASLQcIn8SnRB)p^K4L&B6iMTs8UgXX87m z#|87Z8}ilu6mjh#0heZOi^?7NeOm0dM|{1#;{Tk+!8eb@-nW7_9nM4zwR37*b}KFDnFBu zRlmrjf5;0dz>ZWO)7;#akLmG-@IJnqV{Oh*>$3%pZXCS4wmDWfJZBNsj4@9aRlCs_ zDw|X0A>SGB>BEz{cy- z_yY3RJYwt3&C#*}a=6`fn6T6Tv-?gm`Q+q$uAbvnNEU|0w}{U{+HoT}#fj8RC7soC zhk(1e@$z@+AS^k`2azDY;0FuV2b^qYzT3wP|n{f*6hE|NsmaZbzc zY|w0CCQ{EQLW9qaHRpY33sbd#%zinxIEa(SXjuqb@5AQ~!z>zw-r#B>f-pCrb72%9yLsK#7ZgJ zw)A$-S+(Qs$7PK;pAO-!obbfaxZJ6(Mxuf1yO?Ae9Ia8DA@k+<-T|JuzmVfF01;~) zpHd=*rjxpJpF5N}ga+D&jjWL=$Y!PXqiCoEHjU$76}{xBtv0`j~tK~4)e&k9}n z?Mc-VRnxKo`wa>Hc6PjTaagc`E1<+T+#J!UAg z5h-Gi)C49?^m|i=Pd)toNyw)fw_v0895=vl4!K$eQ$oOS3awse%2;hv%XEAhdPU;s ztf}nN$T|DRfRiA{pLnn>pBu*Q_owdOOd6IIp5_1Li&PZ8m$~b| z2^1pkM4_jiN3{q`iXorkW@VO)`)3bL(tB{EuDO?^p_%T(ihLXH`>D9v)k1OPjE~ff zQr~y}RtQ!Ea@{Gs2z=!aCH{wCbD7FxAQzqML*^m$wy4H^g0DHq9owRzHfz6FU0M6Z zoiB8z>xnS_{O?T+(yIo{69{4X4JU!td|tVpCQpwi@EZL8I;Cs{g|&NhNOk-DCiqCA zKXG6wLLO?5Fz@|EHsld z9yw8GN|4u%=#E&u*)*kwVCpfpK?lhu1*sXTi3s4bNHLdv&hVPYO6he_6q^56Bk=+E zx~SJ=zV7;OXvVM*$z^1fP01%k{Fk{zO==f6&jM<@wqpC)DKU37SE#A+pQTn%NS?hS;CD#v- z%2Kliv|L%ZH5}vAoMq(f6K1wK+B}SB|E4y~+i9^XG&g9%I&Snv`TSDbcR_|U#UF^a zgcW!`^0fHW_;X~puVpQzjFVVU;0t{HLlvjh!uOlv;z;ael8( z13OO?;1qktacEZ*szsZwN8+93QT)v9)aIQnZqV3ZuO(ne6!?G@+Q7odm$Vl(u$hoH z5$1XtHQTIx9`1BFm@t{Gm1@FB=qaQZO6gcw8D;L|n*P|+u7NdMvf4&6PgqIba4jBq zYr3qeE%cXsR}&{3o_xR6*a77qM5ZRqzp1%Ww;>r_uFbWl6O2ly6N0azE}vp{K_f>k z#B=zp*sgHMZUsIFyj*N+y7|E=UjaXaWQAMo;BY#TvXQgu*hXLbbNKTXFlQ9TuB>s! zfod7T;ue53@g5HdAjvEVrs#KuKjQ&OoCxC|rSH^xZp6olPBPhXLAhyR^B(k1k+Nma z&I8MA;}NEyAY;1wf2U8 z+!CxU0G!>YORuBne~_o)+Q7K;tv%1f-M!rSkj!n-0DLoPM)fJ@O2g_NOTZN3$bW= zTD(sL3IEZ8eLTfm`=J9$0WH>a0ZY`zAv37Xn9Haw{vp^pU*1!nk>iS@Ok93x*;zWj zVOF*y3FhEGmLL9Vy*7=^dzL(U*cHbqKw>WnGcPt`1osqo1#^PlRng(}2?HZF1Oauy)z{^R+CfUVbp5Y$9KCnXxNr{ zSCNyOxmNr0<3I9mofHnbcIXxf&f$+d4V&L&CR2<{!u)P95=$?PwUXcb_2zGgpvqAv z{o5pJ2&9TW+aGpbV?S{+UrjIUL1o&-jOE2mnNWR4s>em_p1_W&M$8+b;~#zk@vnmZ zYG8xO*5$Zes{y(seESyY{6q5|Mv#GCrYF3xH3nI+8HZ9YOmcSA` zitGWwj2_2r% z&$#DT!rxtCXRT&h^KdaO+=}j>dy5)sK&0I62Bm zi{1uZMw8E~zBW)1UzK-8D}Oozopx2;b;+_!wlyG1#DMJ@LN=IfCcF5A)`#xp&|ywl zS(IDnQNr2lzoTt|STqlM_2(~~>7e{c+hzl!5T04jJD9dM8&d5%HJ)SrQ$qgK`Ukag z*r-$^!h^4TvHtqs_31o_!mxPyK!hrp(tHuft}cMTaT5XbhRFKa&jV=Uds}h z+hcgPJ@NgSS(w^c7^yv^>pZk^@~%ITFIwNx(YfY(2SLE}XO!ZTu@wZjp;w%yedaww zuoDJ?w|5gCjBFCMUxg{z^v^l?oNe%yDX~Zr*h(os>8F6y8q3?Fo1eeeLrl*)C`1>d zgO(;!JhhDWe**5W9j7Sv3yOz9{!L-v}9Y>=YQmldp9-oQ=SsSc#}P)K_Vyn z`am1|`ARzd9d;(Ij9PAHj~Xz0bWO0UA;LRCLSl2pS(8&&{+R19l*5Nt_3R-+v2#{$ z+Yhrqvd`Vv4B=FXY}8_IvnnKD;Gp6!tzmX+znp8H9cJuw^U7p2^P2qUZ3#VRNUC&@ zGc04YZO%{~dx6l7lrm!rFs70Se?rA>^I|Hp4?@@Mk5!f6zbe7B&mBc;x~ zrX$RI!L|htNxGDcDabTo(lLRPH0iIDk(1s6o6%t6X*k^|s=xsQ(L%FEje26eWXz$9 zL+L=DL8%TtoxfQRp=z=HhQEi4D#KLE1@EJvzfB}K%-!UF*IMmch;+fvRe!8u#~L(2 zw1jwh$XklKT#70`=FXU&cKv~mddVE|8T6WWCWdgnIkR;94`|<3Ei=w&D7%x!4FK%a zu{sixa{Q+-dxcy%)yHrKCzO+3Ts$0^p#ya!1X_IK**Sp>JGkf6(?>bJ@9Hhp#8 zl&Btr=(rL3`(1oKbQsfSDglpJNFMZ2(WWkXEF~|N2u~6HD_=qWXY$4K@K?c=peE7u`$weo<8Pr3l!M9( zbToX-7MeI(KHfze?|rp7Vj*IQ1;f}lcJV(&wHS`#!k7$t`5z*6=6pZ?_;7+j>3V{Q zQWEm3BOL}p>>}oa!>Bf!$P2ULzmI}E13{gS-K~>nimHW^^wC?Xh9@q*um-s`vmn z_H~6x^>_6f|6BD(H>fi$@y+925#ByZF=h-KcyLa*Go!f6kh0oihgM3Bp%tFLj9U9e zmBhC(odhxCTjUAX5C+97hA)LnsbW`Tna7kK>ho@5kqJPwPS%4A%kSzc-pUyKK;GJK zG0i``cul%XPFVX&UssXi^(W-gt2(r@wip!MD!V10fdj$J_cn})-S9R?4pn+YC4zcT zBff3NyEdX{Pl%xp$IB1AB5FDGK#$roY*JZ9WT-_Q;E|Ld;ZJ2xE@20)d+i!u&7$$EZPiNheU}diYX=mIoiQdi8kZKv<5Pkp#E9xG zm`a>kKqI4|-GA9^@-LMIkh80!EJRkGP$lkw`4V+amip;P4}2dn_3NnC0G7hiwrWuaH%?chT?D`*4^5d%Ta6el{@e?Wyqt z1TWMuGq_V@gHG>Wfa*yU1-k)$k^k#eQuiV?#OA*w{tNw1)eXv;p%%Nr^QsS+`^0Ve zfJ*aV#gX$xASHG1mt`_s2 zjHS9&88rENv2Qk!VNOv3T$NiFOo83-d-1z4^e-6`YtrSnV45GBeF7VB{l~2P9UD4= zUUM7X%RBjB)VC9pbp~yrb^3r{)WFu_YE6ZR$r8rOpI8x?+6%DvJ=?R7+cC-j!?fe= zDn(!clClz*E&`GgskiVl`jeKjwqem@>MB6-*?z&<{v|OP3tPD90!bBDe67gW0jd{x z;rYAU;9EfnmC>3-ot0a_O-dM-7APKT65B_?6)r|O%4CvbV59a!^igP4*}6n5#dp{qIxvVi(GV)>a5s(_7{_95Eh2)B z12ZiC*y{g6cC>W?KBYd-iB@uYfpEP$_xh{IDJMO)#H=*zb$De-`R0h)_MzW6+biEI zTOcPZCJZGi%wFWlPB7zKDh+0SU)-{A05_^|NV9XCZHll)Ras_Hhi4d%`upY&SOnN( zgP&>}B~x>%8i}E>YWvY&L;lFKhLUwB5j9-F{9@xpgA!P1W|yK*ATf8PS#Gr-?mp@#hLwPM?-i?d4+)-fS$o|bJf!@)nWIXOyS`Y ztKsKE!aw;$WHUa~cqC2_VXk4%9C*2`P^w(r>`7U+NOVU7GbOkb?-_^cZwsZZx^ktA z7JGA>BaIn7{phAmz9g>Osn6yZS*F{w?xtFN8~^BrElFi_F-2%~&1~-iQAup+JVGgV z=S5{b>8E67ViBS|FjiNNi0{a0B#gKR|Cj@ja?!GFQdS$TaKfCU#@IGv9WT|*5P#m) zqVedRt9{=Nf9+&X2G2p@)rKC&kgFF2Hy9bgA;t5BYC5XzES_yK+Ure4;c{}nkBZNN zUr9-HBlUD&MMBPUci?&hpSf2?OwQqz@9+=*E*BlNQ@Mn@S~~oNN=@eL2CChybZRH! zG<0>dGs|J6RJaRhPl58AbV?~v5#*=)^Z|PNDvxS{`~9Ck0&Ie>@Ve!M~}NpoXt8`YLJ?KkFOx zvtRd5!AuXkzw7w4L-_LB`xe%gGWBJP=U8tsRbs-EaLe}jgsjH*(>uvFn zCUrQ^zw!rM;(TD4I_?z0tfm8Zg)_X-5S=|=Lli2S=CV7Q=7zomo z3TJD5Hh^aLu0uaOx47fm&BwsFZ8DP#cGC#SFXlfvA2bk8c%L`;r$Mg(TSxZe7ApJ) zx1)X4p`GG){x^%eVjxI)g}I)#|9t9WvF3CL7d#9v*m!l4rV|!y5Ary=*7e(_C)rld zK95p5dE@HrMKi^|_YCVHPkj5I{a$(fyS>S^Je#g>U5oTum<-Nq_n3H zn*Goa6NRSPMzA^lv}H%@pJX=9O(x57W-Nfu zJeLG0h{U{L8X7;#3Un|M9-Zl?ARQ)_KH)u%3GPOhR^Kl9herkCuvD?zu?`#7X;}oh zZCyX?rEF{?`GT^(6}@Q9Z|HKCtiFgLQ4^B487@_YD$?=|m`Adwi5o;y7J&r#zegh(udSKZE9QpuTogKcB09p% z4gEl3D8yvx5s{W(p&vS{NO-B9sFqYl9qJ-kR3F!A^CDlzvAKByQ-1OcZYrps&HRv( z(Tz+FK{UtQP&82}G6(}}`{#oBV%N92%l*_B75yj1q2^V_RhbeMMHUy0M~dRL*u@Dt|mt^L!X!1Y4-1vZjd+XjOLnKoL)NRJ(o{?@^Hb2gIja|3?BQ_k)q zo^^FOQn z$r=!;)XnU#LeAi`^^gUUVGyKs9rGrmX7f|~jNuQ(p`EKQktd1-pRFNuQi&Fm@bGbQ zK=J(LvN==oZ%q}hZB~7|w4?Qp1V9kmfTaJyW84!brM12@c5ayBdihjpUiGf%41&TA z$((IBr^BdSFTEXRN#5vcQh{w668n+y>IiU)RxLd*-CBI5LCeb87>Srag=@D>Ck}j^ z+WwwrMi?paOeqOyhz7ROA1lrbQh6fuK8V6`f zzVP%?;((w(2<;z~-C6bX=E<7BNUb=I_yn^%^&I4c;XjCkW2r41z!KL}P+8e8Xdt}4 zrW_WxLL$suuyi>I4uCDEegh9HNaDCq+wSR*Zh-SecATc>dEnUMWM6UpwEi{m9_Uk; z?lA+L%C_Bum8|gQGlMP0WH*q^!KQc2qw86fXJ64RIS^OJ#CzblSO3$X*Rc6lDMq+p z)3KsXwpe4oHDmMA?5`O`FHojc>mvKlCg~O32|W7ey7sK1&qf3dH;h;flixOpt \ No newline at end of file diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 000ea3455..000000000 --- a/docs/index.md +++ /dev/null @@ -1,17 +0,0 @@ -# Welcome to MkDocs - -For full documentation visit [mkdocs.org](https://www.mkdocs.org). - -## Commands - -* `mkdocs new [dir-name]` - Create a new project. -* `mkdocs serve` - Start the live-reloading docs server. -* `mkdocs build` - Build the documentation site. -* `mkdocs -h` - Print help message and exit. - -## Project layout - - mkdocs.yml # The configuration file. - docs/ - index.md # The documentation homepage. - ... # Other markdown pages, images and other files. diff --git a/docs/latex-example.md b/docs/latex-example.md new file mode 100644 index 000000000..2591de954 --- /dev/null +++ b/docs/latex-example.md @@ -0,0 +1,106 @@ +# Effects and Mathematical Formulations + +This page demonstrates how to use LaTeX in flixOpt documentation and explains the Effects system. + +## Effects in flixOpt + +Effects in flixOpt represent impacts or metrics related to your energy system, such as costs, emissions, resource consumption, etc. One effect is designated as the optimization objective (typically costs), while others can have constraints. + +## Mathematical Formulations + +### Storage Model + +The state of charge of a storage evolves according to: + +$$SOC(t+1) = SOC(t) \cdot (1 - \lambda \cdot \Delta t) + \eta_{charge} \cdot P_{in}(t) \cdot \Delta t - \frac{P_{out}(t)}{\eta_{discharge}} \cdot \Delta t$$ + +Where: + +- $SOC(t)$ is the state of charge at time $t$ +- $\lambda$ is the self-discharge rate +- $\eta_{charge}$ is the charging efficiency +- $\eta_{discharge}$ is the discharging efficiency +- $P_{in}(t)$ is the charging power +- $P_{out}(t)$ is the discharging power +- $\Delta t$ is the time step + +### Linear Converter Efficiency + +For a linear converter, the relationship between input and output is: + +$$P_{out}(t) = \eta \cdot P_{in}(t)$$ + +Where: +- $P_{out}(t)$ is the output power +- $P_{in}(t)$ is the input power +- $\eta$ is the efficiency + +### Heat Pump COP + +For a heat pump, the relationship is: + +$$Q_{th}(t) = COP \cdot P_{el}(t)$$ + +Where: +- $Q_{th}(t)$ is the heat output +- $P_{el}(t)$ is the electrical input +- $COP$ is the coefficient of performance + +### Objective Function + +The objective function for cost minimization is: + +$$\min \left( \sum_{t=1}^{T} \sum_{c \in C} c_{op}(t) \cdot P_c(t) \cdot \Delta t + \sum_{c \in C} c_{inv} \cdot CAP_c \right)$$ + +Where: +- $c_{op}(t)$ is the operating cost at time $t$ +- $P_c(t)$ is the power of component $c$ at time $t$ +- $c_{inv}$ is the investment cost +- $CAP_c$ is the capacity of component $c$ + +## Effects API Documentation + +Effects are created using the `Effect` class: + +```python +import flixOpt as fo + +# Create a cost effect (optimization objective) +cost_effect = fo.Effect( + label="costs", + unit="€", + description="Total costs", + is_objective=True # This effect will be minimized +) + +# Create a CO2 emission effect with constraints +co2_effect = fo.Effect( + label="co2_emissions", + unit="kg_CO2", + description="CO2 emissions", + maximum_total=1000 # Maximum total emissions allowed +) + +# Add effects to the system +system.add_effects(cost_effect, co2_effect) +``` + +## Inline Formulas + +You can also use inline formulas like $E = mc^2$ or reference variables like $\eta_{boiler}$ within your text. + +## Multiple Equations Example + +The efficiency of a CHP unit must satisfy: + +$$\eta_{el} + \eta_{th} \leq \eta_{max}$$ + +The total flow through a bus must be balanced: + +$$\sum_{i \in I} F_{i,in}(t) = \sum_{j \in J} F_{j,out}(t)$$ + +For components with on/off decisions, the flow must satisfy: + +$$F_{min} \cdot \delta(t) \leq F(t) \leq F_{max} \cdot \delta(t)$$ + +Where $\delta(t)$ is a binary variable indicating if the component is on. diff --git a/docs/readme.md b/docs/readme.md new file mode 100644 index 000000000..639e48ae9 --- /dev/null +++ b/docs/readme.md @@ -0,0 +1,107 @@ +# flixOpt: Energy and Material Flow Optimization Framework + +**flixOpt** is a Python-based optimization framework designed to tackle energy and material flow problems using mixed-integer linear programming (MILP). Combining flexibility and efficiency, it provides a powerful platform for both dispatch and investment optimization challenges. + +## 🚀 Introduction + +flixOpt was developed by [TU Dresden](https://github.com/gewv-tu-dresden) as part of the SMARTBIOGRID project, funded by the German Federal Ministry for Economic Affairs and Energy. Building on the Matlab-based flixOptMat framework, flixOpt also incorporates concepts from [oemof/solph](https://github.com/oemof/oemof-solph). + +Although flixOpt is in its early stages, it is fully functional and ready for experimentation. Feedback and collaboration are highly encouraged to help shape its future. + +## 🌟 Key Features + +- **High-level Interface** with low-level control + - User-friendly interface for defining energy systems + - Fine-grained control for advanced configurations + - Pre-defined components like CHP, Heat Pump, Cooling Tower, etc. + +- **Investment Optimization** + - Combined dispatch and investment optimization + - Size and discrete investment decisions + - Integration with On/Off variables and constraints + +- **Multiple Effects** + - Couple effects (e.g., specific CO2 costs) + - Set constraints (e.g., max CO2 emissions) + - Easily switch optimization targets (e.g., costs vs CO2) + +- **Calculation Modes** + - **Full Mode** - Exact solutions with high computational requirements + - **Segmented Mode** - Speed up complex systems with variable time overlap + - **Aggregated Mode** - Typical periods for large-scale simulations + +## 📦 Installation + +Install flixOpt directly using pip: + +```bash +pip install git+https://github.com/flixOpt/flixOpt.git +``` + +For full functionality including visualization and time series aggregation: + +```bash +pip install "flixOpt[full] @ git+https://github.com/flixOpt/flixOpt.git" +``` + +## 🖥️ Quick Example + +```python +import flixOpt as fo +import numpy as np + +# Create timesteps +time_series = fo.create_datetime_array('2023-01-01', steps=24, freq='1h') +system = fo.FlowSystem(time_series) + +# Create buses +heat_bus = fo.Bus("Heat") +electricity_bus = fo.Bus("Electricity") + +# Create flows +heat_demand = fo.Flow( + label="heat_demand", + bus=heat_bus, + fixed_relative_profile=100*np.sin(np.linspace(0, 2*np.pi, 24))**2 + 50 +) + +# Create a heat pump component +heat_pump = fo.linear_converters.HeatPump( + label="HeatPump", + COP=3.0, + P_el=fo.Flow("power", electricity_bus), + Q_th=fo.Flow("heat", heat_bus) +) + +# Add everything to the system +system.add_elements(heat_bus, electricity_bus) +system.add_components(heat_pump) +``` + +## ⚙️ How It Works + +flixOpt transforms your energy system model into a mathematical optimization problem, solves it using state-of-the-art solvers, and returns the optimal operation strategy and investment decisions. + +## 📚 Documentation + +- [Getting Started](getting-started.md) - Installation and first steps +- [Concepts](concepts/overview.md) - Core concepts and architecture +- [Examples](examples/basic.md) - Usage examples +- [API Reference](api/flow-system.md) - Full API documentation + +## 🛠️ Compatible Solvers + +flixOpt works with various solvers: + +- HiGHS (installed by default) +- CBC +- GLPK +- Gurobi +- CPLEX + +## 📝 Citation + +If you use flixOpt in your research or project, please cite: + +- **Main Citation:** [DOI:10.18086/eurosun.2022.04.07](https://doi.org/10.18086/eurosun.2022.04.07) +- **Short Overview:** [DOI:10.13140/RG.2.2.14948.24969](https://doi.org/10.13140/RG.2.2.14948.24969) diff --git a/mkdocs.yml b/mkdocs.yml index 8669aa925..b16e4fb41 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -5,18 +5,16 @@ repo_url: https://github.com/flixOpt/flixopt repo_name: flixOpt/flixopt nav: - - Home: index.md + - Home: readme.md - Getting Started: getting-started.md - - User Guide: - - Installation: guide/installation.md - - Concepts: guide/concepts.md - - Components: guide/components.md + - Concepts: concepts/overview.md - Examples: - Basic Example: examples/basic.md - Advanced Example: examples/advanced.md - API Reference: - - FlowSystem: api/flow_system.md + - FlowSystem: api/flow-system.md - Components: api/components.md + - LinearConverters: api/linear-converters.md - Effects: api/effects.md - Calculations: api/calculation.md @@ -24,9 +22,9 @@ theme: name: material palette: primary: indigo - accent: indigo - logo: images/logo.png - favicon: images/favicon.png + accent: deep purple + logo: images/flixopt-icon.svg + favicon: images/flixopt-icon.svg icon: repo: fontawesome/brands/github features: @@ -65,4 +63,8 @@ plugins: handlers: python: options: - show_source: true \ No newline at end of file + show_source: true + docstring_style: google + show_root_heading: true + show_category_heading: true + show_if_no_docstring: true \ No newline at end of file diff --git a/pics/flixopt-icon.svg b/pics/flixopt-icon.svg new file mode 100644 index 000000000..04a6a6851 --- /dev/null +++ b/pics/flixopt-icon.svg @@ -0,0 +1 @@ +flixOpt \ No newline at end of file From 56d05a62771e2808143655e482afc866763c2bfb Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 08:34:53 +0100 Subject: [PATCH 03/87] Improve docs colors --- mkdocs.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index b16e4fb41..b33a38949 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -21,8 +21,18 @@ nav: theme: name: material palette: - primary: indigo - accent: deep purple + - scheme: default # Light mode + primary: indigo + accent: deep purple + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate # Dark mode + primary: indigo + accent: deep purple + toggle: + icon: material/brightness-4 + name: Switch to light mode logo: images/flixopt-icon.svg favicon: images/flixopt-icon.svg icon: From 2fb494bb1a3c7839e4406a6eab10fec1dc7a1ba1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 08:40:06 +0100 Subject: [PATCH 04/87] Improve flow-system.md --- docs/api/flow-system.md | 91 +++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 53 deletions(-) diff --git a/docs/api/flow-system.md b/docs/api/flow-system.md index a89ac21c5..11f8da7ae 100644 --- a/docs/api/flow-system.md +++ b/docs/api/flow-system.md @@ -1,71 +1,56 @@ -# FlowSystem API Reference +# FlowSystem The FlowSystem is the central organizing component in flixOpt, responsible for managing the time series, components, buses, and effects that make up your energy system model. -## FlowSystem Class +## API Reference ::: flixOpt.flow_system.FlowSystem options: - members: true - show_root_heading: true - show_source: true + show_root_heading: false + show_root_toc_entry: false + show_object_full_path: false + show_category_heading: false + show_source: false + members_order: source + heading_level: 3 + docstring_section_style: table -## Examples - -### Creating a FlowSystem +## Usage Examples ```python import flixOpt as fx import pandas as pd -# Create the timesteps with hourly steps for one day -timesteps = pd.date_range('2020-01-01', periods=24, freq='h') -# Initialize the FlowSystem with the timesteps -flow_system = fx.FlowSystem(timesteps=timesteps) +# Create timesteps with hourly steps for one day +timesteps = pd.date_range('2023-01-01', steps=24, freq='1h') -# Add components, buses, and effects -heat_bus = fx.Bus("Heat") -flow_system.add_elements(heat_bus) +# Initialize the FlowSystem +flow_system = fx.FlowSystem(timesteps) -# Visualize the network +# Add buses, components and effects +heat_bus = fx.Bus("Heat") +electricity_bus = fx.Bus("Electricity") +costs = fx.Effect("costs", "€", "Costs", is_standard=True, is_objective=True) +flow_system.add_elements(heat_bus, electricity_bus, costs) + +# You can add components with their connected flows +heat_pump = fx.linear_converters.HeatPump( + label="HeatPump", + COP=3.0, + P_el=fx.Flow("power", electricity_bus.label, effects_per_flow_hour=0.2), + Q_th=fx.Flow("heat", heat_bus.label) +) +flow_system.add_elements(heat_pump) + +# Access components and flow_system structure +print(flow_system.components) # Dictionary of all components +print(flow_system.buses) # Dictionary of all buses +print(flow_system.flows) # Dictionary of all flows + +# Visualize the flow_system network flow_system.plot_network(show=True) -``` - -### Accessing FlowSystem Components - -```python -# Get a list of all components -components = flow_system.components - -# Get a specific component by label -if "Boiler" in flow_system.components: - boiler = flow_system.components["Boiler"] - -# Get all flows in the flow_system -flows = flow_system.flows - -# Get all buses in the flow_system -buses = flow_system.buses -``` - -### Time Series and Indices - -```python -# Get the full time series -full_time = flow_system.time_series - -# Get a subset of the time series -indices = range(12) # First 12 hours -time_subset, time_with_end, dt_hours, total_hours = flow_system.get_time_data_from_indices(indices) -``` - -### Saving System Information - -```python -# Save flow_system information to a JSON file -flow_system.to_json("system_info.json") -# Save flow_system visualization -flow_system.visualize_network(path="system_network.html", show=False) +# Save the flow_system definition +flow_system.to_json("flow_system_definition.json") ``` From 24545f51e687bf86fd862178b6c47ad70c83d08d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 09:39:18 +0100 Subject: [PATCH 05/87] Improve docs creation --- docs/{readme.md => index.md} | 0 mkdocs.yml | 69 ++++++++++++++++++------------------ scripts/gen_ref_pages.py | 35 ++++++++++++++++++ 3 files changed, 70 insertions(+), 34 deletions(-) rename docs/{readme.md => index.md} (100%) create mode 100644 scripts/gen_ref_pages.py diff --git a/docs/readme.md b/docs/index.md similarity index 100% rename from docs/readme.md rename to docs/index.md diff --git a/mkdocs.yml b/mkdocs.yml index b33a38949..717f26ed2 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -4,19 +4,6 @@ site_url: https://flixopt.github.io/flixopt/ repo_url: https://github.com/flixOpt/flixopt repo_name: flixOpt/flixopt -nav: - - Home: readme.md - - Getting Started: getting-started.md - - Concepts: concepts/overview.md - - Examples: - - Basic Example: examples/basic.md - - Advanced Example: examples/advanced.md - - API Reference: - - FlowSystem: api/flow-system.md - - Components: api/components.md - - LinearConverters: api/linear-converters.md - - Effects: api/effects.md - - Calculations: api/calculation.md theme: name: material @@ -38,43 +25,57 @@ theme: icon: repo: fontawesome/brands/github features: + - navigation.instant + - navigation.instant.progress + - navigation.tracking - navigation.tabs - navigation.sections - - toc.integrate - - search.suggest - - search.highlight - - content.tabs.link - - content.code.annotation + - navigation.top + - navigation.footer + - toc.follow + - navigation.indexes + - content.action.edit + - content.action.view - content.code.copy + - content.code.annotate + - content.tooltips markdown_extensions: + - admonition + - codehilite - pymdownx.highlight: anchor_linenums: true + line_spans: __span + pygments_lang_class: true - pymdownx.inlinehilite - pymdownx.snippets - pymdownx.superfences - - pymdownx.arithmatex: - generic: true - - admonition - - footnotes - attr_list + - abbr - md_in_html - - def_list - - tables - -extra_javascript: - - javascripts/mathjax.js - - https://polyfill.io/v3/polyfill.min.js?features=es6 - - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + - footnotes + - pymdownx.tabbed: + alternate_style: true + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg plugins: - search + - table-reader - mkdocstrings: handlers: python: options: - show_source: true - docstring_style: google - show_root_heading: true - show_category_heading: true - show_if_no_docstring: true \ No newline at end of file + merge_init_into_class: true + docstring_options: + ignore_init_summary: true + trim_doctest_flags: true + - literate-nav: + nav_file: SUMMARY.md + - gen-files: + scripts: + - scripts/gen_ref_pages.py + +watch: + - flixOpt \ No newline at end of file diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py new file mode 100644 index 000000000..ab477d8d5 --- /dev/null +++ b/scripts/gen_ref_pages.py @@ -0,0 +1,35 @@ +"""Generate the code reference pages and navigation.""" + +from pathlib import Path + +import mkdocs_gen_files + +nav = mkdocs_gen_files.Nav() + +root = Path(__file__).parent.parent +src = root / "src" / "pyoframe" + +for path in sorted(src.rglob("*.py")): + module_path = path.relative_to(src).with_suffix("") + doc_path = path.relative_to(src).with_suffix(".md") + full_doc_path = Path("reference", doc_path) + + parts = ("pyoframe",) + tuple(module_path.parts) + + if parts[-1] == "__init__": + parts = parts[:-1] + doc_path = doc_path.with_name("index.md") + full_doc_path = full_doc_path.with_name("index.md") + elif parts[-1] == "__main__" or parts[-1].startswith("_"): + continue + + nav[parts] = doc_path.as_posix() + + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + ident = ".".join(parts) + fd.write(f"::: {ident}") + + mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path.relative_to(root)) + +with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: + nav_file.writelines(nav.build_literate_nav()) \ No newline at end of file From 3b5250afac96a2abe78f1feb5d8d12c4fba78044 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 09:50:54 +0100 Subject: [PATCH 06/87] Temp --- docs/api/{components.md => component.md} | 0 ...{linear-converters.md => linear-converter.md} | 0 mkdocs.yml | 16 ++++++++++++++-- 3 files changed, 14 insertions(+), 2 deletions(-) rename docs/api/{components.md => component.md} (100%) rename docs/api/{linear-converters.md => linear-converter.md} (100%) diff --git a/docs/api/components.md b/docs/api/component.md similarity index 100% rename from docs/api/components.md rename to docs/api/component.md diff --git a/docs/api/linear-converters.md b/docs/api/linear-converter.md similarity index 100% rename from docs/api/linear-converters.md rename to docs/api/linear-converter.md diff --git a/mkdocs.yml b/mkdocs.yml index 717f26ed2..6fc31d7a2 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -5,6 +5,20 @@ repo_url: https://github.com/flixOpt/flixopt repo_name: flixOpt/flixopt +nav: + - Home: index.md + - Getting Started: getting-started.md + - Concepts: concepts/overview.md + - Examples: + - Basic Example: examples/basic.md + - Advanced Example: examples/advanced.md + - API Reference: + - FlowSystem: api/flow-system.md + - Component: api/component.md + - LinearConverter: api/linear-converter.md + - Effect: api/effect.md + - Calculation: api/calculation.md + theme: name: material palette: @@ -71,8 +85,6 @@ plugins: docstring_options: ignore_init_summary: true trim_doctest_flags: true - - literate-nav: - nav_file: SUMMARY.md - gen-files: scripts: - scripts/gen_ref_pages.py From 60e985156a8eba0f48be316d363163e5dd432e20 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 11:49:26 +0100 Subject: [PATCH 07/87] Improve --- docs/concepts/overview.md | 2 +- mkdocs.yml | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/concepts/overview.md b/docs/concepts/overview.md index 2515a7407..3ec29f5db 100644 --- a/docs/concepts/overview.md +++ b/docs/concepts/overview.md @@ -32,7 +32,7 @@ Every flixOpt model starts with creating a FlowSystem. - Have a size (fixed or optimized as an investment decision) - Can have fixed profiles (for demands or renewable generation) - Can have constraints (min/max, total flow hours, etc.) -- Can have associated [Effects](#Effects) +- Can have associated [Effects](#effects) ### Components diff --git a/mkdocs.yml b/mkdocs.yml index 6fc31d7a2..735ae6538 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -89,5 +89,10 @@ plugins: scripts: - scripts/gen_ref_pages.py +extra_javascript: + - javascripts/mathjax.js + - https://polyfill.io/v3/polyfill.min.js?features=es6 + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + watch: - flixOpt \ No newline at end of file From fbb81a1299c9b6c9d5899678557326393a80233d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 14:34:59 +0100 Subject: [PATCH 08/87] Update --- docs/api/bus.md | 1 + docs/api/{ => components}/component.md | 76 +------------------ docs/api/components/interfaces.md | 3 + .../linear-converters.md} | 0 docs/api/components/linear_converter.md | 15 ++++ docs/api/components/sink.md | 1 + docs/api/components/source.md | 1 + docs/api/components/storage.md | 17 +++++ docs/api/effect.md | 1 + docs/api/flow-system.md | 15 +--- docs/api/flow.md | 1 + mkdocs.yml | 58 ++++++++++---- pyproject.toml | 8 ++ 13 files changed, 96 insertions(+), 101 deletions(-) create mode 100644 docs/api/bus.md rename docs/api/{ => components}/component.md (51%) create mode 100644 docs/api/components/interfaces.md rename docs/api/{linear-converter.md => components/linear-converters.md} (100%) create mode 100644 docs/api/components/linear_converter.md create mode 100644 docs/api/components/sink.md create mode 100644 docs/api/components/source.md create mode 100644 docs/api/components/storage.md create mode 100644 docs/api/effect.md create mode 100644 docs/api/flow.md diff --git a/docs/api/bus.md b/docs/api/bus.md new file mode 100644 index 000000000..d758470a9 --- /dev/null +++ b/docs/api/bus.md @@ -0,0 +1 @@ +::: flixOpt.elements.Bus \ No newline at end of file diff --git a/docs/api/component.md b/docs/api/components/component.md similarity index 51% rename from docs/api/component.md rename to docs/api/components/component.md index 8bd4b554e..93ccc5b8b 100644 --- a/docs/api/component.md +++ b/docs/api/components/component.md @@ -1,92 +1,20 @@ # Components API Reference -Components in flixOpt represent physical entities that consume, produce, or transform energy and material flows. This page documents the core component classes available in flixOpt. - -## Base Component - -The `Component` class is the base class for all components in flixOpt: - ::: flixOpt.elements.Component - options: - members: true - show_root_heading: true - show_source: true - -## Storage - -The `Storage` class represents energy or material storage components: ::: flixOpt.components.Storage - options: - members: true - show_root_heading: true - show_source: true - -### Storage Model - -::: flixOpt.components.StorageModel - options: - members: true - show_root_heading: true - show_source: true - -## LinearConverter - -The `LinearConverter` class handles linear conversion between flows: ::: flixOpt.components.LinearConverter - options: - members: true - show_root_heading: true - show_source: true - -### LinearConverter Model - -::: flixOpt.components.LinearConverterModel - options: - members: true - show_root_heading: true - show_source: true - -## Transmission - -The `Transmission` class models the flows between two sides with potential losses: ::: flixOpt.components.Transmission - options: - members: true - show_root_heading: true - show_source: true - -### Transmission Model - -::: flixOpt.components.TransmissionModel - options: - members: true - show_root_heading: true - show_source: true - -## Source, Sink, and SourceAndSink - -Classes for sources and sinks in the system: ::: flixOpt.components.Source - options: - members: true - show_root_heading: true - show_source: true + ::: flixOpt.components.Sink - options: - members: true - show_root_heading: true - show_source: true + ::: flixOpt.components.SourceAndSink - options: - members: true - show_root_heading: true - show_source: true ## Examples diff --git a/docs/api/components/interfaces.md b/docs/api/components/interfaces.md new file mode 100644 index 000000000..689176463 --- /dev/null +++ b/docs/api/components/interfaces.md @@ -0,0 +1,3 @@ +:: flixOpt.interfaces.OnOffParameters + +:: flixOpt.interfaces.InvestParameters \ No newline at end of file diff --git a/docs/api/linear-converter.md b/docs/api/components/linear-converters.md similarity index 100% rename from docs/api/linear-converter.md rename to docs/api/components/linear-converters.md diff --git a/docs/api/components/linear_converter.md b/docs/api/components/linear_converter.md new file mode 100644 index 000000000..a399d13ca --- /dev/null +++ b/docs/api/components/linear_converter.md @@ -0,0 +1,15 @@ +::: flixOpt.components.LinearConverter + +### Example Usage + +```python +import flixOpt as fx + +# Create a heat pump with COP = 3 +heat_pump = fx.LinearConverter( + label="HeatPump", + inputs=[fx.Flow(label="power_in", bus='Heat')], + outputs=[fx.Flow(label="heat_out", bus='Heat')], + conversion_factors=[{"power_in": 3, "heat_out": 1}] +) +``` diff --git a/docs/api/components/sink.md b/docs/api/components/sink.md new file mode 100644 index 000000000..4b8506275 --- /dev/null +++ b/docs/api/components/sink.md @@ -0,0 +1 @@ +::: flixOpt.components.Sink \ No newline at end of file diff --git a/docs/api/components/source.md b/docs/api/components/source.md new file mode 100644 index 000000000..60f787b56 --- /dev/null +++ b/docs/api/components/source.md @@ -0,0 +1 @@ +::: flixOpt.components.Source \ No newline at end of file diff --git a/docs/api/components/storage.md b/docs/api/components/storage.md new file mode 100644 index 000000000..09a362a63 --- /dev/null +++ b/docs/api/components/storage.md @@ -0,0 +1,17 @@ +::: flixOpt.components.Storage + +### Creating a Storage + +```python +import flixOpt as fx + +thermal_storage = fx.Storage( + label="ThermalStorage", + charging=fx.Flow("charging", "Wärme", size=100), + discharging=fx.Flow("discharging", "Wärme", size=100), + capacity_in_flow_hours=1000, # 1000 kWh capacity + relative_loss_per_hour=0.01, # 1% loss per hour + eta_charge=0.95, # 95% charging efficiency + eta_discharge=0.95 # 95% discharging efficiency +) +``` diff --git a/docs/api/effect.md b/docs/api/effect.md new file mode 100644 index 000000000..f8b36b75a --- /dev/null +++ b/docs/api/effect.md @@ -0,0 +1 @@ +::: flixOpt.effects.Effect \ No newline at end of file diff --git a/docs/api/flow-system.md b/docs/api/flow-system.md index 11f8da7ae..d3aa8bf30 100644 --- a/docs/api/flow-system.md +++ b/docs/api/flow-system.md @@ -1,19 +1,6 @@ -# FlowSystem - -The FlowSystem is the central organizing component in flixOpt, responsible for managing the time series, components, buses, and effects that make up your energy system model. - -## API Reference +## Overview ::: flixOpt.flow_system.FlowSystem - options: - show_root_heading: false - show_root_toc_entry: false - show_object_full_path: false - show_category_heading: false - show_source: false - members_order: source - heading_level: 3 - docstring_section_style: table ## Usage Examples diff --git a/docs/api/flow.md b/docs/api/flow.md new file mode 100644 index 000000000..e11bddc5f --- /dev/null +++ b/docs/api/flow.md @@ -0,0 +1 @@ +::: flixOpt.elements.Flow \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 735ae6538..68484130d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -14,10 +14,28 @@ nav: - Advanced Example: examples/advanced.md - API Reference: - FlowSystem: api/flow-system.md - - Component: api/component.md - - LinearConverter: api/linear-converter.md - - Effect: api/effect.md + - Elements: + - Effect: api/effect.md + - Flow: api/flow.md + - Bus: api/bus.md + - Components: + - Storage: api/components/storage.md + - LinearConverter: api/components/linear-converter.md + - Transmission: api/components/transmission.md + - Source: api/components/source.md + - Sink: api/components/sink.md + - SourceAndSink: api/components/source-and-sink.md + - LinearConverter: api/components/linear-converter.md + - LinearConverter Subclasses: api/components/linear-converters.md + - Interfaces: + - OnOffParameters: api/interfaces/on-off-parameters.md + - InvestParameters: api/interfaces/invest-parameters.md + + - Calculation: api/calculation.md + - LaTeX: latex-example.md + - Contribute: contribute.md + theme: name: material @@ -48,11 +66,14 @@ theme: - navigation.footer - toc.follow - navigation.indexes + - search.suggest + - search.highlight - content.action.edit - content.action.view - content.code.copy - content.code.annotate - content.tooltips + - content.code.copy markdown_extensions: - admonition @@ -68,6 +89,7 @@ markdown_extensions: - abbr - md_in_html - footnotes + - tables - pymdownx.tabbed: alternate_style: true - pymdownx.emoji: @@ -78,21 +100,31 @@ plugins: - search - table-reader - mkdocstrings: + default_handler: python handlers: python: + paths: [flixOpt] # Package directory relative to mkdocs.yml options: - merge_init_into_class: true - docstring_options: - ignore_init_summary: true - trim_doctest_flags: true - - gen-files: - scripts: - - scripts/gen_ref_pages.py + group_by_category: true + show_source: false + show_root_heading: true + show_object_full_path: false + show_category_heading: true + show_if_no_docstring: true + show_signature: true + members_order: source + heading_level: 2 + show_submodules: true + docstring_style: google + show_signature_annotations: true + separate_signature: true + filters: ["!^_"] # Exclude private methods + docstring_section_style: table extra_javascript: - - javascripts/mathjax.js - - https://polyfill.io/v3/polyfill.min.js?features=es6 - - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + - javascripts/mathjax.js # Custom MathJax 3 CDN Configuration + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js #MathJax 3 CDN + - https://polyfill.io/v3/polyfill.min.js?features=es6 #Support for older browsers watch: - flixOpt \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 2d8b8fd03..27a2461d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,14 @@ full = [ "netcdf4 >= 1.6.1", # Used for saving and loading the FlowSystem with compression ] +docs = [ + "mkdocs-material==9.*", + "mkdocstrings-python", + "mkdocs-gen-files", + "mkdocs-section-index", + "mkdocs-table-reader-plugin" +] + [project.urls] homepage = "https://tu-dresden.de/ing/maschinenwesen/iet/gewv/forschung/forschungsprojekte/flixopt" repository = "https://github.com/flixOpt/flixOpt" From 3f3042c680c75bccbe1ec7a48d7f56915e936754 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 15:40:33 +0100 Subject: [PATCH 09/87] Improve --- docs/api/calculation.md | 118 ---------------------------- docs/api/components/interfaces.md | 4 +- docs/api/components/transmission.md | 1 + docs/api/datatypes.md | 3 + mkdocs.yml | 9 ++- 5 files changed, 14 insertions(+), 121 deletions(-) create mode 100644 docs/api/components/transmission.md create mode 100644 docs/api/datatypes.md diff --git a/docs/api/calculation.md b/docs/api/calculation.md index f72de0a67..d21a03a0c 100644 --- a/docs/api/calculation.md +++ b/docs/api/calculation.md @@ -1,123 +1,5 @@ -# Calculation API Reference - -The calculation module contains classes for solving optimization problems in different ways. flixOpt offers three main calculation modes, each with different performance characteristics and use cases. - -## Calculation Base Class - -::: flixOpt.calculation.Calculation - options: - members: true - show_root_heading: true - show_source: true - -## Full Calculation - -The `FullCalculation` class solves the entire optimization problem at once: - ::: flixOpt.calculation.FullCalculation - options: - members: true - show_root_heading: true - show_source: true - -## Segmented Calculation - -The `SegmentedCalculation` class splits the problem into segments to improve performance: ::: flixOpt.calculation.SegmentedCalculation - options: - members: true - show_root_heading: true - show_source: true - -## Aggregated Calculation - -The `AggregatedCalculation` class uses typical periods to reduce computational requirements: ::: flixOpt.calculation.AggregatedCalculation - options: - members: true - show_root_heading: true - show_source: true - -## Aggregation Parameters - -::: flixOpt.aggregation.AggregationParameters - options: - members: true - show_root_heading: true - show_source: true - -## Examples - -### Full Calculation Example - -```python -import flixOpt as fo - -# Create system and add components -system = fo.FlowSystem(time_series) -# ... add components, buses, etc. - -# Create a full calculation -calculation = fo.FullCalculation("Example", system) - -# Choose a solver -solver = fo.HighsSolver() - -# Run the calculation -calculation.do_modeling() -calculation.solve(solver, save_results=True) - -# Access results -results = calculation.results() -``` - -### Segmented Calculation Example - -```python -import flixOpt as fo - -# Create system and add components -system = fo.FlowSystem(time_series) -# ... add components, buses, etc. - -# Create a segmented calculation -segment_length = 24 # 24 time steps per segment -overlap_length = 6 # 6 time steps overlap between segments -calculation = fo.SegmentedCalculation( - "Segmented_Example", - system, - segment_length=segment_length, - overlap_length=overlap_length -) - -# Choose a solver -solver = fo.HighsSolver() - -# Run the calculation -calculation.do_modeling_and_solve(solver, save_results=True) - -# Access results - combining arrays from all segments -results = calculation.results(combined_arrays=True) -``` - -### Aggregated Calculation Example - -```python -import flixOpt as fo - -# Create system and add components -system = fo.FlowSystem(time_series) -# ... add components, buses, etc. - -# Define aggregation parameters -aggregation_params = fo.AggregationParameters( - hours_per_period=24, # 24 hours per typical period - nr_of_periods=10, # 10 typical periods - fix_storage_flows=False, # Don't fix storage flows - aggregate_data_and_fix_non_binary_vars=True # Aggregate all time series data -) - -# Create an aggregated calculation -calculation = fo.A \ No newline at end of file diff --git a/docs/api/components/interfaces.md b/docs/api/components/interfaces.md index 689176463..40ac1d6d5 100644 --- a/docs/api/components/interfaces.md +++ b/docs/api/components/interfaces.md @@ -1,3 +1,5 @@ :: flixOpt.interfaces.OnOffParameters -:: flixOpt.interfaces.InvestParameters \ No newline at end of file +:: flixOpt.interfaces.InvestParameters + +::: flixOpt.aggregation.AggregationParameters diff --git a/docs/api/components/transmission.md b/docs/api/components/transmission.md new file mode 100644 index 000000000..f86c8c891 --- /dev/null +++ b/docs/api/components/transmission.md @@ -0,0 +1 @@ +::: flixOpt.components.Transmission \ No newline at end of file diff --git a/docs/api/datatypes.md b/docs/api/datatypes.md new file mode 100644 index 000000000..8d79f906f --- /dev/null +++ b/docs/api/datatypes.md @@ -0,0 +1,3 @@ +::: flixOpt.core.Scalar +::: flixOpt.core.NumericData +::: flixOpt.core.NumericDataTS diff --git a/mkdocs.yml b/mkdocs.yml index 68484130d..dbc400c90 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -33,6 +33,7 @@ nav: - Calculation: api/calculation.md + - Datatypes: api/datatypes.md - LaTeX: latex-example.md - Contribute: contribute.md @@ -115,11 +116,15 @@ plugins: members_order: source heading_level: 2 show_submodules: true - docstring_style: google + docstring_style: numpy show_signature_annotations: true separate_signature: true - filters: ["!^_"] # Exclude private methods + filters: ["!^_[^_]", "^__init__"] # Exclude private methods but include __init__ docstring_section_style: table + show_bases: true + show_docstring_attributes: false # Exclude attributes + merge_init_into_class: true # Merge __init__ docs into class docs + infer_type_annotations: true extra_javascript: - javascripts/mathjax.js # Custom MathJax 3 CDN Configuration From 9b2bfa283605bbd06aa51f28ee6799a36b7ad1d1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 16:05:18 +0100 Subject: [PATCH 10/87] Improve --- docs/api/components/component.md | 87 --------------- docs/api/components/source_and_sink.md | 1 + docs/api/interfaces/aggregation_parameters.md | 1 + docs/api/interfaces/invest_parameters.md | 1 + docs/api/interfaces/on_off_parameters.md | 1 + mkdocs.yml | 102 ++++++++++++------ 6 files changed, 75 insertions(+), 118 deletions(-) delete mode 100644 docs/api/components/component.md create mode 100644 docs/api/components/source_and_sink.md create mode 100644 docs/api/interfaces/aggregation_parameters.md create mode 100644 docs/api/interfaces/invest_parameters.md create mode 100644 docs/api/interfaces/on_off_parameters.md diff --git a/docs/api/components/component.md b/docs/api/components/component.md deleted file mode 100644 index 93ccc5b8b..000000000 --- a/docs/api/components/component.md +++ /dev/null @@ -1,87 +0,0 @@ -# Components API Reference - -::: flixOpt.elements.Component - -::: flixOpt.components.Storage - -::: flixOpt.components.LinearConverter - -::: flixOpt.components.Transmission - -::: flixOpt.components.Source - - -::: flixOpt.components.Sink - - -::: flixOpt.components.SourceAndSink - -## Examples - -### Creating a LinearConverter - -```python -import flixOpt as fo - -# Create buses -electricity_bus = fo.Bus("Electricity") -heat_bus = fo.Bus("Heat") - -# Create flows -power_input = fo.Flow("power_in", electricity_bus) -heat_output = fo.Flow("heat_out", heat_bus) - -# Create a heat pump with COP = 3 -heat_pump = fo.components.LinearConverter( - label="HeatPump", - inputs=[power_input], - outputs=[heat_output], - conversion_factors=[{power_input: 3, heat_output: 1}] -) -``` - -### Creating a Storage - -```python -import flixOpt as fo - -# Create a bus -heat_bus = fo.Bus("Heat") - -# Create charging and discharging flows -charging = fo.Flow("charging", heat_bus) -discharging = fo.Flow("discharging", heat_bus) - -# Create a thermal storage -thermal_storage = fo.components.Storage( - label="ThermalStorage", - charging=charging, - discharging=discharging, - capacity_in_flow_hours=1000, # 1000 kWh capacity - relative_loss_per_hour=0.01, # 1% loss per hour - eta_charge=0.95, # 95% charging efficiency - eta_discharge=0.95 # 95% discharging efficiency -) -``` - -### Creating a Transmission Component - -```python -import flixOpt as fo - -# Create buses -bus_a = fo.Bus("Location_A") -bus_b = fo.Bus("Location_B") - -# Create flows -flow_a_to_b = fo.Flow("flow_a_to_b", bus_a) -flow_b_to_a = fo.Flow("flow_b_to_a", bus_b) - -# Create a transmission component with 5% losses -transmission = fo.components.Transmission( - label="Transmission_Line", - in1=flow_a_to_b, - out1=flow_b_to_a, - relative_losses=0.05 -) -``` diff --git a/docs/api/components/source_and_sink.md b/docs/api/components/source_and_sink.md new file mode 100644 index 000000000..4f9d5c7a2 --- /dev/null +++ b/docs/api/components/source_and_sink.md @@ -0,0 +1 @@ +::: flixOpt.components.SourceAndSink \ No newline at end of file diff --git a/docs/api/interfaces/aggregation_parameters.md b/docs/api/interfaces/aggregation_parameters.md new file mode 100644 index 000000000..2c07d9183 --- /dev/null +++ b/docs/api/interfaces/aggregation_parameters.md @@ -0,0 +1 @@ +::: flixOpt.aggregation.AggregationParameters \ No newline at end of file diff --git a/docs/api/interfaces/invest_parameters.md b/docs/api/interfaces/invest_parameters.md new file mode 100644 index 000000000..a195d98e9 --- /dev/null +++ b/docs/api/interfaces/invest_parameters.md @@ -0,0 +1 @@ +::: flixOpt.interface.InvestParameters \ No newline at end of file diff --git a/docs/api/interfaces/on_off_parameters.md b/docs/api/interfaces/on_off_parameters.md new file mode 100644 index 000000000..8012a19dc --- /dev/null +++ b/docs/api/interfaces/on_off_parameters.md @@ -0,0 +1 @@ +::: flixOpt.interface.OnOffParameters \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index dbc400c90..fdc6ea4fa 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -20,16 +20,17 @@ nav: - Bus: api/bus.md - Components: - Storage: api/components/storage.md - - LinearConverter: api/components/linear-converter.md + - LinearConverter: api/components/linear_converter.md - Transmission: api/components/transmission.md - Source: api/components/source.md - Sink: api/components/sink.md - - SourceAndSink: api/components/source-and-sink.md + - SourceAndSink: api/components/source_and_sink.md - LinearConverter: api/components/linear-converter.md - LinearConverter Subclasses: api/components/linear-converters.md - Interfaces: - - OnOffParameters: api/interfaces/on-off-parameters.md - - InvestParameters: api/interfaces/invest-parameters.md + - OnOffParameters: api/interfaces/on_off_parameters.md + - InvestParameters: api/interfaces/invest_parameters.md + - AggregationParameters: api/interfaces/aggregation_parameters.md - Calculation: api/calculation.md @@ -97,34 +98,73 @@ markdown_extensions: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:material.extensions.emoji.to_svg + plugins: - - search - - table-reader - - mkdocstrings: - default_handler: python - handlers: - python: - paths: [flixOpt] # Package directory relative to mkdocs.yml - options: - group_by_category: true - show_source: false - show_root_heading: true - show_object_full_path: false - show_category_heading: true - show_if_no_docstring: true - show_signature: true - members_order: source - heading_level: 2 - show_submodules: true - docstring_style: numpy - show_signature_annotations: true - separate_signature: true - filters: ["!^_[^_]", "^__init__"] # Exclude private methods but include __init__ - docstring_section_style: table - show_bases: true - show_docstring_attributes: false # Exclude attributes - merge_init_into_class: true # Merge __init__ docs into class docs - infer_type_annotations: true + - search # Enables the search functionality in the documentation + - table-reader # Allows including tables from external files + - mkdocstrings: # Handles automatic API documentation generation + default_handler: python # Sets Python as the default language + handlers: + python: # Configuration for Python code documentation + options: + # Controls which members to include or exclude from documentation + # "!^_[^_]" excludes private members (single underscore) + # "^__init__" explicitly includes constructor methods + filters: ["!^_[^_]", "^__init__"] + + # Sets NumPy as the docstring style, recognizing sections like "Parameters", "Returns" + docstring_style: numpy + + # Renders parameter sections as tables instead of lists for better readability + docstring_section_style: table + + # Hides the source code implementation from documentation + show_source: false + + # Displays simple class names instead of full import paths + show_object_full_path: false + + # Shows class attributes in the documentation + show_docstring_attributes: true + + # Documents objects even if they don't have docstrings + show_if_no_docstring: true + + # Displays category headings (Methods, Attributes, etc.) for organization + show_category_heading: true + + # Shows method signatures with parameters + show_signature: true + + # Includes type annotations in the signatures when available + show_signature_annotations: true + + # Displays signatures separate from descriptions for cleaner layout + separate_signature: true + + # Promotes constructor parameters to class-level documentation + merge_init_into_class: true + + # Uses Python type hints to supplement docstring information + infer_type_annotations: true + + # Sets the base heading level for documented objects (h2) + heading_level: 2 + + # Orders members as they appear in the source code + members_order: source + + # Include members inherited from parent classes + inherited_members: true + selection: + members_order: source # Keep order within each kind + order_by_kind: + - function + - method + - property + - attribute + - module + - class extra_javascript: - javascripts/mathjax.js # Custom MathJax 3 CDN Configuration From 1dea8befe194ad5ba3a02ad8db7a4b121fc451e4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 16:31:05 +0100 Subject: [PATCH 11/87] Improve colors --- mkdocs.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index fdc6ea4fa..1eaffc048 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -42,15 +42,19 @@ nav: theme: name: material palette: - - scheme: default # Light mode - primary: indigo - accent: deep purple + # Light mode + - media: "(prefers-color-scheme: light)" + scheme: default + primary: indigo # Try different colors like: deep purple, blue, teal + accent: purple toggle: icon: material/brightness-7 name: Switch to dark mode - - scheme: slate # Dark mode - primary: indigo - accent: deep purple + # Dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: indigo # Can be different from light mode + accent: purple toggle: icon: material/brightness-4 name: Switch to light mode From 799e1272be8ec40372707d4c337f8783791d806c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 17:14:21 +0100 Subject: [PATCH 12/87] Improve docs --- ...inear_converter.md => linear-converter.md} | 0 ...{source_and_sink.md => source-and-sink.md} | 0 mkdocs.yml | 20 ++++------- pyproject.toml | 3 +- scripts/gen_ref_pages.py | 35 ------------------- 5 files changed, 7 insertions(+), 51 deletions(-) rename docs/api/components/{linear_converter.md => linear-converter.md} (100%) rename docs/api/components/{source_and_sink.md => source-and-sink.md} (100%) delete mode 100644 scripts/gen_ref_pages.py diff --git a/docs/api/components/linear_converter.md b/docs/api/components/linear-converter.md similarity index 100% rename from docs/api/components/linear_converter.md rename to docs/api/components/linear-converter.md diff --git a/docs/api/components/source_and_sink.md b/docs/api/components/source-and-sink.md similarity index 100% rename from docs/api/components/source_and_sink.md rename to docs/api/components/source-and-sink.md diff --git a/mkdocs.yml b/mkdocs.yml index 1eaffc048..9482e962c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -20,11 +20,11 @@ nav: - Bus: api/bus.md - Components: - Storage: api/components/storage.md - - LinearConverter: api/components/linear_converter.md + - LinearConverter: api/components/linear-converter.md - Transmission: api/components/transmission.md - Source: api/components/source.md - Sink: api/components/sink.md - - SourceAndSink: api/components/source_and_sink.md + - SourceAndSink: api/components/source-and-sink.md - LinearConverter: api/components/linear-converter.md - LinearConverter Subclasses: api/components/linear-converters.md - Interfaces: @@ -149,9 +149,6 @@ plugins: # Promotes constructor parameters to class-level documentation merge_init_into_class: true - # Uses Python type hints to supplement docstring information - infer_type_annotations: true - # Sets the base heading level for documented objects (h2) heading_level: 2 @@ -160,15 +157,10 @@ plugins: # Include members inherited from parent classes inherited_members: true - selection: - members_order: source # Keep order within each kind - order_by_kind: - - function - - method - - property - - attribute - - module - - class + + extra: + # Uses Python type hints to supplement docstring information + infer_type_annotations: true extra_javascript: - javascripts/mathjax.js # Custom MathJax 3 CDN Configuration diff --git a/pyproject.toml b/pyproject.toml index 27a2461d6..21b6291e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,9 +62,8 @@ full = [ docs = [ "mkdocs-material==9.*", "mkdocstrings-python", - "mkdocs-gen-files", "mkdocs-section-index", - "mkdocs-table-reader-plugin" + "mkdocs-table-reader-plugin", ] [project.urls] diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py deleted file mode 100644 index ab477d8d5..000000000 --- a/scripts/gen_ref_pages.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Generate the code reference pages and navigation.""" - -from pathlib import Path - -import mkdocs_gen_files - -nav = mkdocs_gen_files.Nav() - -root = Path(__file__).parent.parent -src = root / "src" / "pyoframe" - -for path in sorted(src.rglob("*.py")): - module_path = path.relative_to(src).with_suffix("") - doc_path = path.relative_to(src).with_suffix(".md") - full_doc_path = Path("reference", doc_path) - - parts = ("pyoframe",) + tuple(module_path.parts) - - if parts[-1] == "__init__": - parts = parts[:-1] - doc_path = doc_path.with_name("index.md") - full_doc_path = full_doc_path.with_name("index.md") - elif parts[-1] == "__main__" or parts[-1].startswith("_"): - continue - - nav[parts] = doc_path.as_posix() - - with mkdocs_gen_files.open(full_doc_path, "w") as fd: - ident = ".".join(parts) - fd.write(f"::: {ident}") - - mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path.relative_to(root)) - -with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: - nav_file.writelines(nav.build_literate_nav()) \ No newline at end of file From b8a278fe922ad6157bb45c0a0bbe2132886c6be6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 17:56:04 +0100 Subject: [PATCH 13/87] Add datatypes to docs --- docs/api/datatypes.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/docs/api/datatypes.md b/docs/api/datatypes.md index 8d79f906f..8880d0408 100644 --- a/docs/api/datatypes.md +++ b/docs/api/datatypes.md @@ -1,3 +1,36 @@ + ::: flixOpt.core.Scalar + options: + show_source: true + show_if_no_docstring: true + show_signature: true + show_bases: true + show_root_heading: true + show_root_full_path: true + ::: flixOpt.core.NumericData + options: + show_source: true + show_if_no_docstring: true + show_signature: true + show_bases: true + show_root_heading: true + show_root_full_path: true + ::: flixOpt.core.NumericDataTS + options: + show_source: true + show_if_no_docstring: true + show_signature: true + show_bases: true + show_root_heading: true + show_root_full_path: true + +::: flixOpt.effects.EffectValuesUser + options: + show_source: true + show_if_no_docstring: true + show_signature: true + show_bases: true + show_root_heading: true + show_root_full_path: true \ No newline at end of file From f226b6401dd867af39bddd67b8cfa90e753651d4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:18:31 +0100 Subject: [PATCH 14/87] Update all docstrings to google docs and remove type hints (as we use python type hi ts) --- examples/linopy_native_experiments.py | 14 +-- flixOpt/aggregation.py | 47 ++++--- flixOpt/calculation.py | 65 ++++------ flixOpt/components.py | 171 +++++++++----------------- flixOpt/config.py | 9 +- flixOpt/core.py | 52 ++++---- flixOpt/effects.py | 58 +++------ flixOpt/elements.py | 115 ++++++++--------- flixOpt/features.py | 22 ++-- flixOpt/flow_system.py | 24 ++-- flixOpt/interface.py | 97 ++++++--------- flixOpt/linear_converters.py | 116 +++++++---------- flixOpt/plotting.py | 42 +++---- flixOpt/results.py | 44 ++++--- flixOpt/structure.py | 57 ++++----- flixOpt/utils.py | 4 +- mkdocs.yml | 7 +- 17 files changed, 400 insertions(+), 544 deletions(-) diff --git a/examples/linopy_native_experiments.py b/examples/linopy_native_experiments.py index 6407d42f1..2d373d30a 100644 --- a/examples/linopy_native_experiments.py +++ b/examples/linopy_native_experiments.py @@ -16,15 +16,11 @@ def __init__( periods: Optional[List[int]] = None, ): """ - Parameters - ---------- - timesteps : pd.DatetimeIndex - The timesteps of the model. - hours_of_last_step : Optional[float], optional - The duration of the last time step. Uses the last time interval if not specified - periods : Optional[List[int]], optional - The periods of the model. Every period has the same timesteps. - Usually years are used as periods. + Args: + timesteps: The timesteps of the model. + hours_of_last_step: The duration of the last time step. Uses the last time interval if not specified + periods: The periods of the model. Every period has the same timesteps. + Usually years are used as periods. """ super().__init__(force_dim_names=True) self.timesteps = timesteps diff --git a/flixOpt/aggregation.py b/flixOpt/aggregation.py index 3d010e264..7f6648c86 100644 --- a/flixOpt/aggregation.py +++ b/flixOpt/aggregation.py @@ -52,8 +52,16 @@ def __init__( time_series_for_high_peaks: List[str] = None, time_series_for_low_peaks: List[str] = None, ): + """ - Write a docstring please + Args: + original_data: The original data to aggregate + hours_per_time_step: The duration of each timestep in hours. + hours_per_period: The duration of each period in hours. + nr_of_periods: The number of typical periods to use in the aggregation. + weights: The weights for aggregation. If None, all time series are equally weighted. + time_series_for_high_peaks: List of time series to use for explicitly selecting periods with high values. + time_series_for_low_peaks: List of time series to use for explicitly selecting periods with low values. """ if not TSAM_AVAILABLE: raise ImportError("The 'tsam' package is required for clustering functionality. " @@ -225,29 +233,20 @@ def __init__( """ Initializes aggregation parameters for time series data - Parameters - ---------- - hours_per_period : float - Duration of each period in hours. - nr_of_periods : int - Number of typical periods to use in the aggregation. - fix_storage_flows : bool - Whether to aggregate storage flows (load/unload); if other flows - are fixed, fixing storage flows is usually not required. - aggregate_data_and_fix_non_binary_vars : bool - Whether to aggregate all time series data, which allows to fix all time series variables (like flow_rate), - or only fix binary variables. If False non time_series data is changed!! If True, the mathematical Problem - is simplified even further. - percentage_of_period_freedom : float, optional - Specifies the maximum percentage (0–100) of binary values within each period - that can deviate as "free variables", chosen by the solver (default is 0). - This allows binary variables to be 'partly equated' between aggregated periods. - penalty_of_period_freedom : float, optional - The penalty associated with each "free variable"; defaults to 0. Added to Penalty - time_series_for_high_peaks : list of TimeSeriesData - List of time series to use for explicitly selecting periods with high values. - time_series_for_low_peaks : list of TimeSeriesData - List of time series to use for explicitly selecting periods with low values. + Args: + hours_per_period: Duration of each period in hours. + nr_of_periods: Number of typical periods to use in the aggregation. + fix_storage_flows: Whether to aggregate storage flows (load/unload); if other flows + are fixed, fixing storage flows is usually not required. + aggregate_data_and_fix_non_binary_vars: Whether to aggregate all time series data, which allows to fix all time series variables (like flow_rate), + or only fix binary variables. If False non time_series data is changed!! If True, the mathematical Problem + is simplified even further. + percentage_of_period_freedom: Specifies the maximum percentage (0–100) of binary values within each period + that can deviate as "free variables", chosen by the solver (default is 0). + This allows binary variables to be 'partly equated' between aggregated periods. + penalty_of_period_freedom: The penalty associated with each "free variable"; defaults to 0. Added to Penalty + time_series_for_high_peaks: List of TimeSeriesData to use for explicitly selecting periods with high values. + time_series_for_low_peaks: List of TimeSeriesData to use for explicitly selecting periods with low values. """ self.hours_per_period = hours_per_period self.nr_of_periods = nr_of_periods diff --git a/flixOpt/calculation.py b/flixOpt/calculation.py index 6f03dc378..4642ed2c9 100644 --- a/flixOpt/calculation.py +++ b/flixOpt/calculation.py @@ -47,16 +47,11 @@ def __init__( folder: Optional[pathlib.Path] = None, ): """ - Parameters - ---------- - name : str - name of calculation - flow_system : FlowSystem - flow_system which should be calculated - active_timesteps : List[int] or None - list with indices, which should be used for calculation. If None, then all timesteps are used. - folder : pathlib.Path or None - folder where results should be saved. If None, then the current working directory is used. + Args: + name: name of calculation + flow_system: flow_system which should be calculated + active_timesteps: list with indices, which should be used for calculation. If None, then all timesteps are used. + folder: folder where results should be saved. If None, then the current working directory is used. """ self.name = name self.flow_system = flow_system @@ -175,9 +170,9 @@ def save_results(self, save_flow_system: bool = False, compression: int = 0): Parameters ---------- - save_flow_system : bool, optional + save_flow_system: bool, optional Whether to save the flow_system, by default False - compression : int, optional + compression: int, optional Compression level for the netCDF file, by default 0 wich leads to no compression. Currently, only the Flow System file can be compressed. """ @@ -212,22 +207,16 @@ def __init__( Class for Optimizing the FLowSystem including: 1. Aggregating TimeSeriesData via typical periods using tsam. 2. Equalizing variables of typical periods. - Parameters - ---------- - name : str - name of calculation - flow_system : FlowSystem - flow_system which should be calculated - aggregation_parameters : AggregationParameters - Parameters for aggregation. See documentation of AggregationParameters class. - components_to_clusterize: List[Component] or None - List of Components to perform aggregation on. If None, then all components are aggregated. - This means, teh variables in the components are equalized to each other, according to the typical periods - computed in the DataAggregation - active_timesteps : pd.DatetimeIndex or None - list with indices, which should be used for calculation. If None, then all timesteps are used. - folder : pathlib.Path or None - folder where results should be saved. If None, then the current working directory is used. + Args: + name: name of calculation + flow_system: flow_system which should be calculated + aggregation_parameters: Parameters for aggregation. See documentation of AggregationParameters class. + components_to_clusterize: List of Components to perform aggregation on. If None, then all components are aggregated. + This means, teh variables in the components are equalized to each other, according to the typical periods + computed in the DataAggregation + active_timesteps: pd.DatetimeIndex or None + list with indices, which should be used for calculation. If None, then all timesteps are used. + folder: folder where results should be saved. If None, then the current working directory is used. """ super().__init__(name, flow_system, active_timesteps, folder=folder) self.aggregation_parameters = aggregation_parameters @@ -312,19 +301,13 @@ def __init__( don't really work in this Calculation. Lower bounds to such SUMS can lead to weird results. This is NOT yet explicitly checked for... - Parameters - ---------- - name : str - name of calculation - flow_system : FlowSystem - flow_system which should be calculated - timesteps_per_segment : int - The number of time_steps per individual segment (without the overlap) - overlap_timesteps : int - The number of time_steps that are added to each individual model. Used for better - results of storages) - folder : pathlib.Path or None - folder where results should be saved. If None, then the current working directory is used. + Args: + name: name of calculation + flow_system: flow_system which should be calculated + timesteps_per_segment: The number of time_steps per individual segment (without the overlap) + overlap_timesteps: The number of time_steps that are added to each individual model. Used for better + results of storages) + folder: folder where results should be saved. If None, then the current working directory is used. """ super().__init__(name, flow_system, folder=folder) self.timesteps_per_segment = timesteps_per_segment diff --git a/flixOpt/components.py b/flixOpt/components.py index 39258f953..9ddb98c0a 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -24,7 +24,7 @@ @register_class_for_io class LinearConverter(Component): """ - Converts one FLow into another via linear conversion factors + Converts input-Flows into output-Flows via linear conversion factors """ def __init__( @@ -38,25 +38,20 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - inputs : input flows. - outputs : output flows. - on_off_parameters: Information about on and off states. See class OnOffParameters. - conversion_factors : linear relation between flows. - Either 'conversion_factors' or 'segmented_conversion_factors' can be used! - example heat pump: - segmented_conversion_factors : Segmented linear relation between flows. - Each Flow gets a List of Segments assigned. - If FLows need to be 0 (or Off), include a "Zero-Segment" "(0, 0)", or use on_off_parameters - Either 'segmented_conversion_factors' or 'conversion_factors' can be used! - --> "gaps" can be expressed by a segment not starting at the end of the prior segment : [(1,3), (4,5)] - --> "points" can expressed as segment with same begin and end : [(3,3), (4,4)] - + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + inputs: The input Flows + outputs: The output Flows + on_off_parameters: Information about on and off states. See class OnOffParameters. + conversion_factors: linear relation between flows. + Either 'conversion_factors' or 'segmented_conversion_factors' can be used! + segmented_conversion_factors: Segmented linear relation between flows. + Each Flow gets a List of Segments assigned. + If FLows need to be 0 (or Off), include a "Zero-Segment" "(0, 0)", or use on_off_parameters + Either 'segmented_conversion_factors' or 'conversion_factors' can be used! + --> "gaps" can be expressed by a segment not starting at the end of the prior segment: [(1,3), (4,5)] + --> "points" can expressed as segment with same begin and end: [(3,3), (4,4)] """ super().__init__(label, inputs, outputs, on_off_parameters, meta_data=meta_data) self.conversion_factors = conversion_factors or [] @@ -159,41 +154,22 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - constructor of storage - - Parameters - ---------- - label : str - description. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - charging : Flow - ingoing flow. - discharging : Flow - outgoing flow. - capacity_in_flow_hours : Scalar or InvestParameter - nominal capacity of the storage - relative_minimum_charge_state : float or TS, optional - minimum relative charge state. The default is 0. - relative_maximum_charge_state : float or TS, optional - maximum relative charge state. The default is 1. - initial_charge_state : None, float (0...1), 'lastValueOfSim', optional - storage charge_state at the beginning. The default is 0. - float: defined charge_state at start of first timestep - None: free to choose by optimizer - 'lastValueOfSim': chargeState0 is equal to chargestate of last timestep ("closed simulation") - minimal_final_charge_state : float or None, optional - minimal value of chargeState at the end of timeseries. - maximal_final_charge_state : float or None, optional - maximal value of chargeState at the end of timeseries. - eta_charge : float, optional - efficiency factor of charging/loading. The default is 1. - eta_discharge : TYPE, optional - efficiency factor of uncharging/unloading. The default is 1. - relative_loss_per_hour : float or TS. optional - loss per chargeState-Unit per hour. The default is 0. - prevent_simultaneous_charge_and_discharge : boolean, optional - should simultaneously Loading and Unloading be avoided? (Attention, Performance maybe becomes worse with avoidInAndOutAtOnce=True). The default is True. + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + charging: ingoing flow. + discharging: outgoing flow. + capacity_in_flow_hours: nominal capacity/size of the storage + relative_minimum_charge_state: minimum relative charge state. The default is 0. + relative_maximum_charge_state: maximum relative charge state. The default is 1. + initial_charge_state: storage charge_state at the beginning. The default is 0. + minimal_final_charge_state: minimal value of chargeState at the end of timeseries. + maximal_final_charge_state: maximal value of chargeState at the end of timeseries. + eta_charge: efficiency factor of charging/loading. The default is 1. + eta_discharge: efficiency factor of uncharging/unloading. The default is 1. + relative_loss_per_hour: loss per chargeState-Unit per hour. The default is 0. + prevent_simultaneous_charge_and_discharge: If True, loading and unloading at the same time is not possible. + Increases the number of binary variables, but is recommended for easier evaluation. The default is True. """ # TODO: fixed_relative_chargeState implementieren super().__init__( @@ -259,32 +235,24 @@ def __init__( absolute_losses: Optional[NumericDataTS] = None, on_off_parameters: OnOffParameters = None, prevent_simultaneous_flows_in_both_directions: bool = True, + meta_data: Optional[Dict] = None, ): """ Initializes a Transmission component (Pipe, cable, ...) that models the flows between two sides with potential losses. - Parameters - ---------- - label : str - The name of the transmission component. - in1 : Flow - The inflow at side A. Pass InvestmentParameters here. - out1 : Flow - The outflow at side B. - in2 : Optional[Flow], optional - The optional inflow at side B. - If in1 got Investmentparameters, the size of this Flow will be equal to in1 (with no extra effects!) - out2 : Optional[Flow], optional - The optional outflow at side A. - relative_losses : Optional[NumericDataTS], optional - The relative loss between inflow and outflow, e.g., 0.02 for 2% loss. - absolute_losses : Optional[NumericDataTS], optional - The absolute loss, occur only when the Flow is on. Induces the creation of the ON-Variable - on_off_parameters : OnOffParameters, optional - Parameters defining the on/off behavior of the component. - prevent_simultaneous_flows_in_both_directions : bool, default=True - If True, prevents simultaneous flows in both directions. + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + in1: The inflow at side A. Pass InvestmentParameters here. + out1: The outflow at side B. + in2: The optional inflow at side B. + If in1 got InvestParameters, the size of this Flow will be equal to in1 (with no extra effects!) + out2: The optional outflow at side A. + relative_losses: The relative loss between inflow and outflow, e.g., 0.02 for 2% loss. + absolute_losses: The absolute loss, occur only when the Flow is on. Induces the creation of the ON-Variable + on_off_parameters: Parameters defining the on/off behavior of the component. + prevent_simultaneous_flows_in_both_directions: If True, inflow and outflow are not allowed to be both non-zero at same timestep. """ super().__init__( label, @@ -294,6 +262,7 @@ def __init__( prevent_simultaneous_flows=None if in2 is None or prevent_simultaneous_flows_in_both_directions is False else [in1, in2], + meta_data=meta_data, ) self.in1 = in1 self.out1 = out1 @@ -558,10 +527,6 @@ class SourceAndSink(Component): """ class for source (output-flow) and sink (input-flow) in one commponent """ - - # source : Flow - # sink : Flow - def __init__( self, label: str, @@ -571,20 +536,12 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name of sourceAndSink - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - source : Flow - output-flow of this component - sink : Flow - input-flow of this component - prevent_simultaneous_sink_and_source: boolean. Default ist True. - True: inflow and outflow are not allowed to be both non-zero at same timestep. - False: inflow and outflow are working independently. - + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + source: output-flow of this component + sink: input-flow of this component + prevent_simultaneous_sink_and_source: If True, inflow and outflow can not be active simultaniously. """ super().__init__( label, @@ -602,14 +559,10 @@ def __init__( class Source(Component): def __init__(self, label: str, source: Flow, meta_data: Optional[Dict] = None): """ - Parameters - ---------- - label : str - name of source - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - source : Flow - output-flow of source + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + source: output-flow of source """ super().__init__(label, outputs=[source], meta_data=meta_data) self.source = source @@ -619,16 +572,10 @@ def __init__(self, label: str, source: Flow, meta_data: Optional[Dict] = None): class Sink(Component): def __init__(self, label: str, sink: Flow, meta_data: Optional[Dict] = None): """ - constructor of sink - - Parameters - ---------- - label : str - name of sink. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - sink : Flow - input-flow of sink + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + sink: input-flow of sink """ super().__init__(label, inputs=[sink], meta_data=meta_data) self.sink = sink diff --git a/flixOpt/config.py b/flixOpt/config.py index 5ba7decd1..e07458bd8 100644 --- a/flixOpt/config.py +++ b/flixOpt/config.py @@ -14,10 +14,11 @@ def merge_configs(defaults: dict, overrides: dict) -> dict: """ Merge the default configuration with user-provided overrides. - - :param defaults: Default configuration dictionary. - :param overrides: User configuration dictionary. - :return: Merged configuration dictionary. + Args: + defaults: Default configuration dictionary. + overrides: User configuration dictionary. + Returns: + Merged configuration dictionary. """ for key, value in overrides.items(): if isinstance(value, dict) and key in defaults and isinstance(defaults[key], dict): diff --git a/flixOpt/core.py b/flixOpt/core.py index 9d89585e0..76f554fbf 100644 --- a/flixOpt/core.py +++ b/flixOpt/core.py @@ -16,9 +16,14 @@ logger = logging.getLogger('flixOpt') -Scalar = Union[int, float] # Datatype +Scalar = Union[int, float] +"""A type representing a single number, either integer or float.""" + NumericData = Union[int, float, np.integer, np.floating, np.ndarray, pd.Series, pd.DataFrame, xr.DataArray] +"""Represents any form of numeric data, from simple scalars to complex data structures.""" + NumericDataTS = Union[NumericData, 'TimeSeriesData'] +"""Represents either standard numeric data or TimeSeriesData.""" class ConversionError(Exception): @@ -93,19 +98,13 @@ def __init__(self, data: NumericData, agg_group: Optional[str] = None, agg_weigh --> this 3 series of same type share one weight, i.e. internally assigned each weight = 1/3 (instead of standard weight = 1) - Parameters - ---------- - data : Union[int, float, np.ndarray] - The timeseries data, which can be a scalar, array, or numpy array. - agg_group : str, optional - The group this TimeSeriesData is a part of. agg_weight is split between members of a group. Default is None. - agg_weight : float, optional - The weight for calculation_type 'aggregated', should be between 0 and 1. Default is None. - - Raises - ------ - Exception - If both agg_group and agg_weight are set, an exception is raised. + Args: + data: The timeseries data, which can be a scalar, array, or numpy array. + agg_group: The group this TimeSeriesData is a part of. agg_weight is split between members of a group. Default is None. + agg_weight: The weight for calculation_type 'aggregated', should be between 0 and 1. Default is None. + + Raises: + Exception: If both agg_group and agg_weight are set, an exception is raised. """ self.data = data self.agg_group = agg_group @@ -215,7 +214,7 @@ def __init__(self, """ Initialize a TimeSeries with a DataArray. - Parameters: + Args: data: The DataArray containing time series data name: The name of the TimeSeries aggregation_weight: The weight in aggregation calculations @@ -472,7 +471,16 @@ def __init__( hours_of_last_timestep: Optional[float] = None, hours_of_previous_timesteps: Optional[Union[float, np.ndarray]] = None ): - """Initialize with timesteps and optional duration settings.""" + + """ + Args: + timesteps: The timesteps of the Collection. + hours_of_last_timestep: The duration of the last time step. Uses the last time interval if not specified + hours_of_previous_timesteps: The duration of previous timesteps. + If None, the first time increment of time_series is used. + This is needed to calculate previous durations (for example consecutive_on_hours). + If you use an array, take care that its long enough to cover all previous values! + """ # Prepare and validate timesteps self._validate_timesteps(timesteps) self.hours_of_previous_timesteps = self._calculate_hours_of_previous_timesteps( @@ -581,7 +589,7 @@ def activate_timesteps(self, active_timesteps: Optional[pd.DatetimeIndex] = None Parameters ---------- - active_timesteps : Optional[pd.DatetimeIndex] + active_timesteps: Optional[pd.DatetimeIndex] The active timesteps of the model. If None, the all timesteps of the TimeSeriesCollection are taken.""" if active_timesteps is None: @@ -627,9 +635,9 @@ def insert_new_data(self, data: pd.DataFrame, include_extra_timestep: bool = Fal Parameters ---------- - data : pd.DataFrame + data: pd.DataFrame DataFrame containing new data with timestamps as index - include_extra_timestep : bool, optional + include_extra_timestep: bool, optional Whether the provided data already includes the extra timestep, by default False """ if not isinstance(data, pd.DataFrame): @@ -675,9 +683,9 @@ def to_dataframe(self, Parameters ---------- - filtered : Literal['all', 'constant', 'non_constant'], optional + filtered: Literal['all', 'constant', 'non_constant'], optional Filter time series by variability, by default 'non_constant' - include_extra_timestep : bool, optional + include_extra_timestep: bool, optional Whether to include the extra timestep in the result, by default True Returns @@ -709,7 +717,7 @@ def to_dataset(self, include_constants: bool = True) -> xr.Dataset: Parameters ---------- - include_constants : bool, optional + include_constants: bool, optional Whether to include time series with constant values, by default True Returns diff --git a/flixOpt/effects.py b/flixOpt/effects.py index 4d695633c..fcf629595 100644 --- a/flixOpt/effects.py +++ b/flixOpt/effects.py @@ -50,45 +50,25 @@ def __init__( maximum_total: Optional[Scalar] = None, ): """ - Parameters - ---------- - label : str - name - unit : str - unit of effect, i.g. €, kg_CO2, kWh_primaryEnergy - description : str - long name - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - is_standard : boolean, optional - true, if Standard-Effect (for direct input of value without effect (alternatively to dict)) , else false - is_objective : boolean, optional - true, if optimization target - specific_share_to_other_effects_operation : {effectType: TS, ...}, i.g. 180 €/t_CO2, input as {costs: 180}, optional - share to other effects (only operation) - specific_share_to_other_effects_invest : {effectType: TS, ...}, i.g. 180 €/t_CO2, input as {costs: 180}, optional - share to other effects (only invest). - minimum_operation : scalar, optional - minimal sum (only operation) of the effect - maximum_operation : scalar, optional - maximal sum (nur operation) of the effect. - minimum_operation_per_hour : scalar or TS - maximum value per hour (only operation) of effect (=sum of all effect-shares) for each timestep! - maximum_operation_per_hour : scalar or TS - minimum value per hour (only operation) of effect (=sum of all effect-shares) for each timestep! - minimum_invest : scalar, optional - minimal sum (only invest) of the effect - maximum_invest : scalar, optional - maximal sum (only invest) of the effect - minimum_total : scalar, optional - min sum of effect (invest+operation). - maximum_total : scalar, optional - max sum of effect (invest+operation). - - Returns - ------- - None. - + Args: + label: The name + unit: The unit of effect, i.g. €, kg_CO2, kWh_primaryEnergy + description: The long name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + is_standard: true, if Standard-Effect (for direct input of value without effect (alternatively to dict)) , else false + is_objective: true, if optimization target + specific_share_to_other_effects_operation: {effectType: TS, ...}, i.g. 180 €/t_CO2, input as {costs: 180}, optional + share to other effects (only operation) + specific_share_to_other_effects_invest: {effectType: TS, ...}, i.g. 180 €/t_CO2, input as {costs: 180}, optional + share to other effects (only invest). + minimum_operation: minimal sum (only operation) of the effect. + maximum_operation: maximal sum (nur operation) of the effect. + minimum_operation_per_hour: max. value per hour (only operation) of effect (=sum of all effect-shares) for each timestep! + maximum_operation_per_hour: min. value per hour (only operation) of effect (=sum of all effect-shares) for each timestep! + minimum_invest: minimal sum (only invest) of the effect + maximum_invest: maximal sum (only invest) of the effect + minimum_total: min sum of effect (invest+operation). + maximum_total: max sum of effect (invest+operation). """ super().__init__(label, meta_data=meta_data) self.label = label diff --git a/flixOpt/elements.py b/flixOpt/elements.py index 0cf80a2cc..5b2975905 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -38,20 +38,17 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - inputs : input flows. - outputs : output flows. - on_off_parameters: Information about on and off state of Component. - Component is On/Off, if all connected Flows are On/Off. - Induces On-Variable in all FLows! - See class OnOffParameters. - prevent_simultaneous_flows: Define a Group of Flows. Only one them can be on at a time. - Induces On-Variable in all FLows! + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + inputs: input flows. + outputs: output flows. + on_off_parameters: Information about on and off state of Component. + Component is On/Off, if all connected Flows are On/Off. + Induces On-Variable in all FLows! + See class OnOffParameters. + prevent_simultaneous_flows: Define a Group of Flows. Only one them can be on at a time. + Induces On-Variable in all Flows! If possible, use OnOffParameters in a single Flow instead. """ super().__init__(label, meta_data=meta_data) self.inputs: List['Flow'] = inputs or [] @@ -87,16 +84,12 @@ def __init__( self, label: str, excess_penalty_per_flow_hour: Optional[NumericDataTS] = 1e5, meta_data: Optional[Dict] = None ): """ - Parameters - ---------- - label : str - name. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - excess_penalty_per_flow_hour : none or scalar, array or TimeSeriesData - excess costs / penalty costs (bus balance compensation) - (none/ 0 -> no penalty). The default is 1e5. - (Take care: if you use a timeseries (no scalar), timeseries is aggregated if calculation_type = aggregated!) + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + excess_penalty_per_flow_hour: excess costs / penalty costs (bus balance compensation) + (none/ 0 -> no penalty). The default is 1e5. + (Take care: if you use a timeseries (no scalar), timeseries is aggregated if calculation_type = aggregated!) """ super().__init__(label, meta_data=meta_data) self.excess_penalty_per_flow_hour = excess_penalty_per_flow_hour @@ -125,9 +118,12 @@ def with_excess(self) -> bool: class Connection: # input/output-dock (TODO: # -> wäre cool, damit Komponenten auch auch ohne Knoten verbindbar - # input wären wie Flow,aber statt bus : connectsTo -> hier andere Connection oder aber Bus (dort keine Connection, weil nicht notwendig) + # input wären wie Flow,aber statt bus: connectsTo -> hier andere Connection oder aber Bus (dort keine Connection, weil nicht notwendig) def __init__(self): + """ + This class is not yet implemented! + """ raise NotImplementedError() @@ -155,48 +151,33 @@ def __init__( meta_data: Optional[Dict] = None, ): r""" - Parameters - ---------- - label : str - name of flow - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - bus : Bus, optional - bus to which flow is linked - size : scalar, InvestmentParameters, optional - size of the flow. If InvestmentParameters is used, size is optimized. - If size is None, a default value is used. - relative_minimum : scalar, array, TimeSeriesData, optional - min value is relative_minimum multiplied by size - relative_maximum : scalar, array, TimeSeriesData, optional - max value is relative_maximum multiplied by size. If size = max then relative_maximum=1 - load_factor_min : scalar, optional - minimal load factor general: avg Flow per nominalVal/investSize - (e.g. boiler, kW/kWh=h; solarthermal: kW/m²; - def: :math:`load\_factor:= sumFlowHours/ (nominal\_val \cdot \Delta t_{tot})` - load_factor_max : scalar, optional - maximal load factor (see minimal load factor) - effects_per_flow_hour : scalar, array, TimeSeriesData, optional - operational costs, costs per flow-"work" - on_off_parameters : OnOffParameters, optional - If present, flow can be "off", i.e. be zero (only relevant if relative_minimum > 0) - Therefore a binary var "on" is used. Further, several other restrictions and effects can be modeled - through this On/Off State (See OnOffParameters) - flow_hours_total_max : TYPE, optional - maximum flow-hours ("flow-work") - (if size is not const, maybe load_factor_max fits better for you!) - flow_hours_total_min : TYPE, optional - minimum flow-hours ("flow-work") - (if size is not const, maybe load_factor_min fits better for you!) - fixed_relative_profile : scalar, array, TimeSeriesData, optional - fixed relative values for flow (if given). - flow_rate(t) := fixed_relative_profile(t) * size(t) - With this value, the flow_rate is no opt-variable anymore; - (relative_minimum u. relative_maximum are iverwritten) - used for fixed load profiles, i.g. heat demand, wind-power, solarthermal - If the load-profile is just an upper limit, use relative_maximum instead. - previous_flow_rate : scalar, array, optional - previous flow rate of the component. + Args: + label: The name + meta_data: used to store more information about the element. Is not used internally, but saved in the results + bus: bus to which flow is linked + size: size of the flow. If InvestmentParameters is used, size is optimized. + If size is None, a default value is used. + relative_minimum: min value is relative_minimum multiplied by size + relative_maximum: max value is relative_maximum multiplied by size. If size = max then relative_maximum=1 + load_factor_min: minimal load factor general: avg Flow per nominalVal/investSize + (e.g. boiler, kW/kWh=h; solarthermal: kW/m²; + def: :math:`load\_factor:= sumFlowHours/ (nominal\_val \cdot \Delta t_{tot})` + load_factor_max: maximal load factor (see minimal load factor) + effects_per_flow_hour: operational costs, costs per flow-"work" + on_off_parameters: If present, flow can be "off", i.e. be zero (only relevant if relative_minimum > 0) + Therefore a binary var "on" is used. Further, several other restrictions and effects can be modeled + through this On/Off State (See OnOffParameters) + flow_hours_total_max: maximum flow-hours ("flow-work") + (if size is not const, maybe load_factor_max is the better choice!) + flow_hours_total_min: minimum flow-hours ("flow-work") + (if size is not predefined, maybe load_factor_min is the better choice!) + fixed_relative_profile: fixed relative values for flow (if given). + flow_rate(t) := fixed_relative_profile(t) * size(t) + With this value, the flow_rate is no optimization-variable anymore. + (relative_minimum and relative_maximum are ignored) + used for fixed load or supply profiles, i.g. heat demand, wind-power, solarthermal + If the load-profile is just an upper limit, use relative_maximum instead. + previous_flow_rate: previous flow rate of the component. """ super().__init__(label, meta_data=meta_data) self.size = size or CONFIG.modeling.BIG # Default size diff --git a/flixOpt/features.py b/flixOpt/features.py index 3e79aed43..3b2db08a6 100644 --- a/flixOpt/features.py +++ b/flixOpt/features.py @@ -607,7 +607,7 @@ def compute_previous_on_states(previous_values: List[Optional[NumericData]], eps ---------- previous_values: List[NumericData] List of previous values of the defining variables. In Range [0, inf] or None (ignored) - epsilon : float, optional + epsilon: float, optional Tolerance for equality to determine "off" state, default is 1e-5. Returns: @@ -639,9 +639,9 @@ def compute_consecutive_duration( Parameters ---------- - binary_values : int, np.ndarray + binary_values: int, np.ndarray An int or 1D binary array containing only `0`s and `1`s. - hours_per_timestep : int, float, np.ndarray + hours_per_timestep: int, float, np.ndarray The duration of each timestep in hours. Returns @@ -746,17 +746,17 @@ def __init__( """ Parameters ---------- - model : linopy.Model + model: linopy.Model Model to which the segmented variable belongs. - label_of_element : str + label_of_element: str Name of the parent variable. - sample_points : dict[str, list[tuple[float, float]]] + sample_points: dict[str, list[tuple[float, float]]] Dictionary mapping variables (names) to their sample points for each segment. The sample points are tuples of the form (start, end). - can_be_outside_segments : bool or linopy.Variable, optional + can_be_outside_segments: bool or linopy.Variable, optional Whether the variable can be outside the segments. If True, a variable is created. If False or None, no variable is created. If a Variable is passed, it is used. - as_time_series : bool, optional + as_time_series: bool, optional """ super().__init__(model, label_of_element, label) self.outside_segments: Optional[linopy.Variable] = None @@ -900,11 +900,11 @@ def add_share( Parameters ---------- - system_model : SystemModel + system_model: SystemModel The system model. - name : str + name: str The name of the share. - expression : linopy.LinearExpression + expression: linopy.LinearExpression The expression of the share. Added to the right hand side of the constraint. """ if name in self.shares: diff --git a/flixOpt/flow_system.py b/flixOpt/flow_system.py index 39d3160fc..5700a462c 100644 --- a/flixOpt/flow_system.py +++ b/flixOpt/flow_system.py @@ -40,17 +40,13 @@ def __init__( hours_of_previous_timesteps: Optional[Union[int, float, np.ndarray]] = None, ): """ - Parameters - ---------- - timesteps : pd.DatetimeIndex - The timesteps of the model. - hours_of_last_timestep : Optional[float], optional - The duration of the last time step. Uses the last time interval if not specified - hours_of_previous_timesteps : Union[int, float, np.ndarray] - The duration of previous timesteps. - If None, the first time increment of time_series is used. - This is needed to calculate previous durations (for example consecutive_on_hours). - If you use an array, take care that its long enough to cover all previous values! + Args: + timesteps: The timesteps of the model. + hours_of_last_timestep: The duration of the last time step. Uses the last time interval if not specified + hours_of_previous_timesteps: The duration of previous timesteps. + If None, the first time increment of time_series is used. + This is needed to calculate previous durations (for example consecutive_on_hours). + If you use an array, take care that its long enough to cover all previous values! """ self.time_series_collection = TimeSeriesCollection( timesteps=timesteps, @@ -126,7 +122,7 @@ def add_elements(self, *elements: Element) -> None: Parameters ---------- - *elements : childs of Element like Boiler, HeatPump, Bus,... + *elements: childs of Element like Boiler, HeatPump, Bus,... modeling Elements """ @@ -154,7 +150,7 @@ def to_json(self, path: Union[str, pathlib.Path]): Parameters: ----------- - path : Union[str, pathlib.Path] + path: Union[str, pathlib.Path] The path to the json file. """ with open(path, 'w', encoding='utf-8') as f: @@ -345,7 +341,7 @@ def _check_if_element_is_unique(self, element: Element) -> None: Parameters ---------- - element : Element + element: Element new element to check """ if element in self.all_elements.values(): diff --git a/flixOpt/interface.py b/flixOpt/interface.py index ec4462633..65aa66577 100644 --- a/flixOpt/interface.py +++ b/flixOpt/interface.py @@ -41,39 +41,29 @@ def __init__( divest_effects: Optional['EffectValuesUserScalar'] = None, ): """ - Parameters - ---------- - fix_effects : None or scalar, optional - Fixed investment costs if invested. - (Attention: Annualize costs to chosen period!) - divest_effects : None or scalar, optional - Fixed divestment costs (if not invested, e.g., demolition costs or contractual penalty). - fixed_size : int, float, optional - Determines if the investment size is fixed. - optional : bool, optional - If True, investment is not forced. - specific_effects : scalar or Dict[Effect: Union[int, float, np.ndarray], optional - Specific costs, e.g., in €/kW_nominal or €/m²_nominal. - Example: {costs: 3, CO2: 0.3} with costs and CO2 representing an Object of class Effect - (Attention: Annualize costs to chosen period!) - effects_in_segments : list or List[ List[Union[int,float]], Dict[cEffecType: Union[List[Union[int,float]], optional - Linear relation in segments [invest_segments, cost_segments]. - Example 1: - [ [5, 25, 25, 100], # size in kW - {costs: [50,250,250,800], # € - PE: [5, 25, 25, 100] # kWh_PrimaryEnergy - } - ] - Example 2 (if only standard-effect): - [ [5, 25, 25, 100], # kW # size in kW - [50,250,250,800] # value for standart effect, typically € - ] # € - (Attention: Annualize costs to chosen period!) - (Args 'specific_effects' and 'fix_effects' can be used in parallel to InvestsizeSegments) - minimum_size : scalar - Min nominal value (only if: size_is_fixed = False). - maximum_size : scalar, Optional - Max nominal value (only if: size_is_fixed = False). + Args: + fix_effects: Fixed investment costs if invested. (Attention: Annualize costs to chosen period!) + divest_effects: Fixed divestment costs (if not invested, e.g., demolition costs or contractual penalty). + fixed_size: Determines if the investment size is fixed. + optional: If True, investment is not forced. + specific_effects: Specific costs, e.g., in €/kW_nominal or €/m²_nominal. + Example: {costs: 3, CO2: 0.3} with costs and CO2 representing an Object of class Effect + (Attention: Annualize costs to chosen period!) + effects_in_segments: Linear relation in segments [invest_segments, cost_segments]. + Example 1: + [ [5, 25, 25, 100], # size in kW + {costs: [50,250,250,800], # € + PE: [5, 25, 25, 100] # kWh_PrimaryEnergy + } + ] + Example 2 (if only standard-effect): + [ [5, 25, 25, 100], # kW # size in kW + [50,250,250,800] # value for standart effect, typically € + ] # € + (Attention: Annualize costs to chosen period!) + (Args 'specific_effects' and 'fix_effects' can be used in parallel to InvestsizeSegments) + minimum_size: Min nominal value (only if: size_is_fixed = False). + maximum_size: Max nominal value (only if: size_is_fixed = False). """ self.fix_effects: EffectValuesUser = fix_effects or {} self.divest_effects: EffectValuesUser = divest_effects or {} @@ -113,35 +103,24 @@ def __init__( force_switch_on: bool = False, ): """ - on_off_parameters class for modeling on and off state of an Element. + Bundles information about the on and off state of an Element. If no parameters are given, the default is to create a binary variable for the on state without further constraints or effects and a variable for the total on hours. - Parameters - ---------- - effects_per_switch_on : scalar, array, TimeSeriesData, optional - cost of one switch from off (var_on=0) to on (var_on=1), - unit i.g. in Euro - effects_per_running_hour : scalar or TS, optional - costs for operating, i.g. in € per hour - on_hours_total_min : scalar, optional - min. overall sum of operating hours. - on_hours_total_max : scalar, optional - max. overall sum of operating hours. - consecutive_on_hours_min : scalar, optional - min sum of operating hours in one piece - (last on-time period of timeseries is not checked and can be shorter) - consecutive_on_hours_max : scalar, optional - max sum of operating hours in one piece - consecutive_off_hours_min : scalar, optional - min sum of non-operating hours in one piece - (last off-time period of timeseries is not checked and can be shorter) - consecutive_off_hours_max : scalar, optional - max sum of non-operating hours in one piece - switch_on_total_max : integer, optional - max nr of switchOn operations - force_switch_on : bool - force creation of switch on variable, even if there is no switch_on_total_max + Args: + effects_per_switch_on: cost of one switch from off (var_on=0) to on (var_on=1), + unit i.g. in Euro + effects_per_running_hour: costs for operating, i.g. in € per hour + on_hours_total_min: min. overall sum of operating hours. + on_hours_total_max: max. overall sum of operating hours. + consecutive_on_hours_min: min sum of operating hours in one piece + (last on-time period of timeseries is not checked and can be shorter) + consecutive_on_hours_max: max sum of operating hours in one piece + consecutive_off_hours_min: min sum of non-operating hours in one piece + (last off-time period of timeseries is not checked and can be shorter) + consecutive_off_hours_max: max sum of non-operating hours in one piece + switch_on_total_max: max nr of switchOn operations + force_switch_on: force creation of switch on variable, even if there is no switch_on_total_max """ self.effects_per_switch_on: EffectValuesUser = effects_per_switch_on or {} self.effects_per_running_hour: EffectValuesUser = effects_per_running_hour or {} diff --git a/flixOpt/linear_converters.py b/flixOpt/linear_converters.py index cbb33772e..13f78c36d 100644 --- a/flixOpt/linear_converters.py +++ b/flixOpt/linear_converters.py @@ -28,20 +28,13 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - constructor for boiler - - Parameters - ---------- - label : str - name of bolier. - eta : float or TS - thermal efficiency. - Q_fu : Flow - fuel input-flow - Q_th : Flow - thermal output-flow. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results + Args: + label: The name + eta: thermal efficiency. + Q_fu: fuel input-flow + Q_th: thermal output-flow. + on_off_parameters: Parameters defining the on/off behavior of the component. + meta_data: used to store more information about the element. Is not used internally, but saved in the results """ super().__init__( label, @@ -76,19 +69,13 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name of bolier. - eta : float or TS - thermal efficiency. - P_el : Flow - electric input-flow - Q_th : Flow - thermal output-flow. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - + Args: + label: The name + eta: thermal efficiency. + P_el: electric input-flow + Q_th: thermal output-flow. + on_off_parameters: Parameters defining the on/off behavior of the component. + meta_data: used to store more information about the element. Is not used internally, but saved in the results """ super().__init__( label, @@ -124,18 +111,13 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name of heatpump. - COP : float or TS - Coefficient of performance. - P_el : Flow - electricity input-flow. - Q_th : Flow - thermal output-flow. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results + Args: + label: The name + COP: Coefficient of performance. + P_el: electricity input-flow. + Q_th: thermal output-flow. + on_off_parameters: Parameters defining the on/off behavior of the component. + meta_data: used to store more information about the element. Is not used internally, but saved in the results """ super().__init__( label, @@ -171,19 +153,13 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name of cooling tower. - specific_electricity_demand : float or TS - auxiliary electricty demand per cooling power, i.g. 0.02 (2 %). - P_el : Flow - electricity input-flow. - Q_th : Flow - thermal input-flow. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results - + Args: + label: The name + specific_electricity_demand: auxiliary electricty demand per cooling power, i.g. 0.02 (2 %). + P_el: electricity input-flow. + Q_th: thermal input-flow. + on_off_parameters: Parameters defining the on/off behavior of the component. + meta_data: used to store more information about the element. Is not used internally, but saved in the results """ super().__init__( label, @@ -227,19 +203,19 @@ def __init__( Parameters ---------- - label : str + label: str name of CHP-unit. - eta_th : float or TS + eta_th: float or TS thermal efficiency. - eta_el : float or TS + eta_el: float or TS electrical efficiency. - Q_fu : cFlow + Q_fu: cFlow fuel input-flow. - P_el : cFlow + P_el: cFlow electricity output-flow. - Q_th : cFlow + Q_th: cFlow heat output-flow. - meta_data : Optional[Dict] + meta_data: Optional[Dict] used to store more information about the element. Is not used internally, but saved in the results """ heat = {Q_fu.label: eta_th, Q_th.label: 1} @@ -292,20 +268,14 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - Parameters - ---------- - label : str - name of heatpump. - COP : float, TS - Coefficient of performance. - Q_ab : Flow - Heatsource input-flow. - P_el : Flow - electricity input-flow. - Q_th : Flow - thermal output-flow. - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results + Args: + label: The name + COP: Coefficient of performance. + Q_ab: Heatsource input-flow. + P_el: electricity input-flow. + Q_th: thermal output-flow. + on_off_parameters: Parameters defining the on/off behavior of the component. + meta_data: used to store more information about the element. Is not used internally, but saved in the results """ # super: diff --git a/flixOpt/plotting.py b/flixOpt/plotting.py index 85ae8f1fa..1390e4d32 100644 --- a/flixOpt/plotting.py +++ b/flixOpt/plotting.py @@ -37,20 +37,20 @@ def with_plotly( Parameters ---------- - data : pd.DataFrame + data: pd.DataFrame A DataFrame containing the data to plot, where the index represents time (e.g., hours), and each column represents a separate data series. - mode : {'bar', 'line'}, default='bar' + mode: {'bar', 'line'}, default='bar' The plotting mode. Use 'bar' for stacked bar charts or 'line' for stepped lines. - colors : List[str], str, default='viridis' + colors: List[str], str, default='viridis' A List of colors (as str) or a name of a colorscale (e.g., 'viridis', 'plasma') to use for coloring the data series. title: str The title of the plot. ylabel: str The label for the y-axis. - fig : go.Figure, optional + fig: go.Figure, optional A Plotly figure object to plot on. If not provided, a new figure will be created. show: bool @@ -210,20 +210,20 @@ def with_matplotlib( Parameters ---------- - data : pd.DataFrame + data: pd.DataFrame A DataFrame containing the data to plot. The index should represent time (e.g., hours), and each column represents a separate data series. - mode : {'bar', 'line'}, default='bar' + mode: {'bar', 'line'}, default='bar' Plotting mode. Use 'bar' for stacked bar charts or 'line' for stepped lines. - colors : List[str], str, default='viridis' + colors: List[str], str, default='viridis' A List of colors (as str) or a name of a colorscale (e.g., 'viridis', 'plasma') to use for coloring the data series. figsize: Tuple[int, int], optional Specify the size of the figure - fig : plt.Figure, optional + fig: plt.Figure, optional A Matplotlib figure object to plot on. If not provided, a new figure will be created. - ax : plt.Axes, optional + ax: plt.Axes, optional A Matplotlib axes object to plot on. If not provided, a new axes will be created. show: bool @@ -327,12 +327,12 @@ def heat_map_matplotlib( Parameters ---------- - data : pd.DataFrame + data: pd.DataFrame A DataFrame containing the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. The values in the DataFrame will be represented as colors in the heatmap. - color_map : str, optional + color_map: str, optional The colormap to use for the heatmap. Default is 'viridis'. Matplotlib supports various colormaps like 'plasma', 'inferno', 'cividis', etc. - figsize : tuple of float, optional + figsize: tuple of float, optional The size of the figure to create. Default is (12, 6), which results in a width of 12 inches and a height of 6 inches. show: bool Wether to show the figure after creation. @@ -406,12 +406,12 @@ def heat_map_plotly( Parameters ---------- - data : pd.DataFrame + data: pd.DataFrame A DataFrame with the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. The values in the DataFrame will be represented as colors in the heatmap. - color_map : str, optional + color_map: str, optional The color scale to use for the heatmap. Default is 'viridis'. Plotly supports various color scales like 'Cividis', 'Inferno', etc. - categorical_labels : bool, optional + categorical_labels: bool, optional If True, the x and y axes are treated as categorical data (i.e., the index and columns will not be interpreted as continuous data). Default is True. If False, the axes are treated as continuous, which may be useful for time series or numeric data. show: bool @@ -481,10 +481,10 @@ def reshape_to_2d(data_1d: np.ndarray, nr_of_steps_per_column: int) -> np.ndarra Parameters ---------- - data_1d : np.ndarray + data_1d: np.ndarray A 1D numpy array with the data to reshape. - nr_of_steps_per_column : int + nr_of_steps_per_column: int The number of steps (rows) per column in the resulting 2D array. For example, this could be 24 (for hours) or 31 (for days in a month). @@ -535,15 +535,15 @@ def heat_map_data_from_df( Parameters ---------- - df : pd.DataFrame + df: pd.DataFrame A DataFrame with a DateTime index containing the data to reshape. - periods : str + periods: str The time interval of each period (columns of the heatmap), such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. - steps_per_period : str + steps_per_period: str The time interval within each period (rows in the heatmap), such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. - fill : str, optional + fill: str, optional Method to fill missing values: 'ffill' for forward fill or 'bfill' for backward fill. Returns diff --git a/flixOpt/results.py b/flixOpt/results.py index 042239f7f..5331f8894 100644 --- a/flixOpt/results.py +++ b/flixOpt/results.py @@ -29,26 +29,26 @@ class CalculationResults: Parameters ---------- - model : linopy.Model + model: linopy.Model The linopy model that was used to solve the calculation. - infos : Dict + infos: Dict Information about the calculation, - results_structure : Dict[str, Dict[str, Dict]] + results_structure: Dict[str, Dict[str, Dict]] The structure of the flow_system that was used to solve the calculation. Attributes ---------- - model : linopy.Model + model: linopy.Model The linopy model that was used to solve the calculation. - components : Dict[str, ComponentResults] + components: Dict[str, ComponentResults] A dictionary of ComponentResults for each component in the flow_system. - buses : Dict[str, BusResults] + buses: Dict[str, BusResults] A dictionary of BusResults for each bus in the flow_system. - effects : Dict[str, EffectResults] + effects: Dict[str, EffectResults] A dictionary of EffectResults for each effect in the flow_system. - timesteps_extra : pd.DatetimeIndex + timesteps_extra: pd.DatetimeIndex The extra timesteps of the flow_system. - hours_per_timestep : xr.DataArray + hours_per_timestep: xr.DataArray The duration of each timestep in hours. Class Methods @@ -81,13 +81,25 @@ def from_calculation(cls, calculation: 'Calculation'): name=calculation.name, folder=calculation.folder) - def __init__(self, - model: linopy.Model, - results_structure: Dict[str, Dict[str, Dict]], - name: str, - infos: Dict, - network_infos: Dict, - folder: Optional[pathlib.Path] = None): + def __init__( + self, + model: linopy.Model, + results_structure: Dict[str, Dict[str, Dict]], + name: str, + infos: Dict, + network_infos: Dict, + folder: Optional[pathlib.Path] = None + ): + """ + Args: + model: The linopy model that was used to solve the calculation. + results_structure: The structure of the flow_system that was used to solve the calculation. + name: The name of the calculation. + infos: Information about the calculation, + network_infos: Information about the network. + folder: The folder where the results are saved. + """ + self.model = model self._results_structure = results_structure self.infos = infos diff --git a/flixOpt/structure.py b/flixOpt/structure.py index fa8765f1f..941634d4a 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -41,8 +41,16 @@ def register_class_for_io(cls): class SystemModel(linopy.Model): + """ + The SystemModel is the linopy Model that is used to create the mathematical model of the flow_system. + It is used to create and store the variables and constraints for the flow_system. + """ def __init__(self, flow_system: 'FlowSystem'): + """ + Args: + flow_system: The flow_system that is used to create the model. + """ super().__init__(force_dim_names=True) self.flow_system = flow_system self.time_series_collection = flow_system.time_series_collection @@ -77,7 +85,7 @@ def coords_extra(self) -> Tuple[pd.DatetimeIndex]: class Interface: """ - This class is used to collect arguments about a Model. + This class is used to collect arguments about a Model. Its the base class for all Elements and Models in flixOpt. """ def transform_data(self, flow_system: 'FlowSystem'): @@ -128,7 +136,7 @@ def to_json(self, path: Union[str, pathlib.Path]): Parameters: ----------- - path : Union[str, pathlib.Path] + path: Union[str, pathlib.Path] The path to the json file. """ data = get_compact_representation(self.infos(use_numpy=True, use_element_label=True)) @@ -223,16 +231,13 @@ def __str__(self): class Element(Interface): - """Basic Element of flixOpt""" + """This class is the basic Element of flixOpt. Every Element has a label""" def __init__(self, label: str, meta_data: Dict = None): """ - Parameters - ---------- - label : str - label of the element - meta_data : Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results + Args: + label: The label of the element + meta_data: Used to store more information about the element. Is not used internally, but saved in the results """ self.label = Element._valid_label(label) self.meta_data = meta_data if meta_data is not None else {} @@ -272,18 +277,15 @@ def _valid_label(label: str) -> str: class Model: - """Stores Variables and Constraints""" + """Stores Variables and Constraints.""" def __init__(self, model: SystemModel, label_of_element: str, label: Optional[str] = None, label_full: Optional[str] = None): """ - Parameters - ---------- - label_of_element : str - The label of the parent (Element). Used to construct the full label of the model. - label : str - The label of the model. Used to construct the full label of the model. - label_full : str - The full label of the model. Can overwrite the full label constructed from the other labels. + Args: + model: The SystemModel that is used to create the model. + label_of_element: The label of the parent (Element). Used to construct the full label of the model. + label: The label of the model. Used to construct the full label of the model. + label_full: The full label of the model. Can overwrite the full label constructed from the other labels. """ self._model = model self.label_of_element = label_of_element @@ -312,9 +314,9 @@ def add( Parameters ---------- - item : linopy.Variable, linopy.Constraint, InterfaceModel + item: linopy.Variable, linopy.Constraint, InterfaceModel The variable, constraint or sub-model to add to the model - short_name : str, optional + short_name: str, optional The short name of the variable, constraint or sub-model. If not provided, the full name is used. """ # TODO: Check uniquenes of short names @@ -410,14 +412,13 @@ def all_sub_models(self) -> List['Model']: class ElementModel(Model): - """Interface to create the mathematical Variables and Constraints for Elements""" + """Stores the mathematical Variables and Constraints for Elements""" def __init__(self, model: SystemModel, element: Element): """ - Parameters - ---------- - element : Element - The element this model is created for. + Args: + model: The SystemModel that is used to create the model. + element: The element this model is created for. """ super().__init__(model, label_of_element=element.label_full, label=element.label, label_full=element.label_full) self.element = element @@ -446,12 +447,12 @@ def copy_and_convert_datatypes(data: Any, use_numpy: bool = True, use_element_la Parameters ---------- - data : Any + data: Any The input data to process, which may be deeply nested and contain a mix of types. - use_numpy : bool, optional + use_numpy: bool, optional If `True`, numeric numpy arrays (`np.ndarray`) are preserved as-is. If `False`, they are converted to lists. Default is `True`. - use_element_label : bool, optional + use_element_label: bool, optional If `True`, `Element` objects are represented by their `label`. If `False`, they are converted into a dictionary based on their initialization parameters. Default is `False`. diff --git a/flixOpt/utils.py b/flixOpt/utils.py index d43c5d999..c59aa1191 100644 --- a/flixOpt/utils.py +++ b/flixOpt/utils.py @@ -36,9 +36,9 @@ def convert_dataarray(data: xr.DataArray, mode: Literal['py', 'numpy', 'xarray', Parameters ---------- - data : xr.DataArray + data: xr.DataArray The data to convert. - mode : Literal['py', 'numpy', 'xarray', 'structure'] + mode: Literal['py', 'numpy', 'xarray', 'structure'] Whether to return the dataaray to - python native types (for json) - numpy array diff --git a/mkdocs.yml b/mkdocs.yml index 9482e962c..b4c4a59d2 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -116,8 +116,11 @@ plugins: # "^__init__" explicitly includes constructor methods filters: ["!^_[^_]", "^__init__"] - # Sets NumPy as the docstring style, recognizing sections like "Parameters", "Returns" - docstring_style: numpy + # Sets google as the docstring style + docstring_style: google + + # Improves type annotations + modernize_annotations: true # Renders parameter sections as tables instead of lists for better readability docstring_section_style: table From 3bb12d18d6ff3cf306db12ae8abe8d39071234ad Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:32:04 +0100 Subject: [PATCH 15/87] Improve docs --- docs/api/components/linear-converters.md | 40 ++---------------------- mkdocs.yml | 5 ++- 2 files changed, 7 insertions(+), 38 deletions(-) diff --git a/docs/api/components/linear-converters.md b/docs/api/components/linear-converters.md index 87a0c8885..2ebf9f1f5 100644 --- a/docs/api/components/linear-converters.md +++ b/docs/api/components/linear-converters.md @@ -1,54 +1,20 @@ # Linear Converters API Reference -The `linear_converters` module provides pre-defined specialized converters that extend the base `LinearConverter` class. These components make it easier to create common energy system elements like boilers, heat pumps, and CHPs. - -## Boiler +The `linear_converters` module provides pre-defined specialized converters that simplify the usage of the `LinearConverter` class. Common energy system elements like boilers, heat pumps, and CHPs are predefined. +For more advanced LinearConverters, refer to the [LinearConverter API Reference](linear-converter.md). ::: flixOpt.linear_converters.Boiler - options: - members: true - show_root_heading: true - show_source: true - -## Power2Heat ::: flixOpt.linear_converters.Power2Heat - options: - members: true - show_root_heading: true - show_source: true - -## HeatPump ::: flixOpt.linear_converters.HeatPump - options: - members: true - show_root_heading: true - show_source: true - -## HeatPumpWithSource ::: flixOpt.linear_converters.HeatPumpWithSource - options: - members: true - show_root_heading: true - show_source: true - -## CoolingTower ::: flixOpt.linear_converters.CoolingTower - options: - members: true - show_root_heading: true - show_source: true - -## CHP (Combined Heat and Power) ::: flixOpt.linear_converters.CHP - options: - members: true - show_root_heading: true - show_source: true + ## Examples diff --git a/mkdocs.yml b/mkdocs.yml index b4c4a59d2..af4c9cfa9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -26,7 +26,7 @@ nav: - Sink: api/components/sink.md - SourceAndSink: api/components/source-and-sink.md - LinearConverter: api/components/linear-converter.md - - LinearConverter Subclasses: api/components/linear-converters.md + - Subclasses of LinearConverter: api/components/linear-converters.md - Interfaces: - OnOffParameters: api/interfaces/on_off_parameters.md - InvestParameters: api/interfaces/invest_parameters.md @@ -119,6 +119,9 @@ plugins: # Sets google as the docstring style docstring_style: google + # whether the documented object's name should be displayed as a heading at the beginning of its documentation + show_root_heading: true + # Improves type annotations modernize_annotations: true From a93766a534f4a6164adaa546574bc8067f140796 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:35:05 +0100 Subject: [PATCH 16/87] Update meta_data documentation --- flixOpt/components.py | 10 +++++----- flixOpt/effects.py | 2 +- flixOpt/elements.py | 8 ++++---- flixOpt/linear_converters.py | 37 ++++++++++++++---------------------- flixOpt/structure.py | 2 +- 5 files changed, 25 insertions(+), 34 deletions(-) diff --git a/flixOpt/components.py b/flixOpt/components.py index 9ddb98c0a..fb305656c 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -40,7 +40,6 @@ def __init__( """ Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results inputs: The input Flows outputs: The output Flows on_off_parameters: Information about on and off states. See class OnOffParameters. @@ -52,6 +51,7 @@ def __init__( Either 'segmented_conversion_factors' or 'conversion_factors' can be used! --> "gaps" can be expressed by a segment not starting at the end of the prior segment: [(1,3), (4,5)] --> "points" can expressed as segment with same begin and end: [(3,3), (4,4)] + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__(label, inputs, outputs, on_off_parameters, meta_data=meta_data) self.conversion_factors = conversion_factors or [] @@ -156,7 +156,6 @@ def __init__( """ Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results charging: ingoing flow. discharging: outgoing flow. capacity_in_flow_hours: nominal capacity/size of the storage @@ -170,6 +169,7 @@ def __init__( relative_loss_per_hour: loss per chargeState-Unit per hour. The default is 0. prevent_simultaneous_charge_and_discharge: If True, loading and unloading at the same time is not possible. Increases the number of binary variables, but is recommended for easier evaluation. The default is True. + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ # TODO: fixed_relative_chargeState implementieren super().__init__( @@ -243,7 +243,6 @@ def __init__( Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results in1: The inflow at side A. Pass InvestmentParameters here. out1: The outflow at side B. in2: The optional inflow at side B. @@ -253,6 +252,7 @@ def __init__( absolute_losses: The absolute loss, occur only when the Flow is on. Induces the creation of the ON-Variable on_off_parameters: Parameters defining the on/off behavior of the component. prevent_simultaneous_flows_in_both_directions: If True, inflow and outflow are not allowed to be both non-zero at same timestep. + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__( label, @@ -538,10 +538,10 @@ def __init__( """ Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results source: output-flow of this component sink: input-flow of this component prevent_simultaneous_sink_and_source: If True, inflow and outflow can not be active simultaniously. + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__( label, @@ -561,8 +561,8 @@ def __init__(self, label: str, source: Flow, meta_data: Optional[Dict] = None): """ Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results source: output-flow of source + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__(label, outputs=[source], meta_data=meta_data) self.source = source diff --git a/flixOpt/effects.py b/flixOpt/effects.py index fcf629595..82cc1de8c 100644 --- a/flixOpt/effects.py +++ b/flixOpt/effects.py @@ -54,7 +54,6 @@ def __init__( label: The name unit: The unit of effect, i.g. €, kg_CO2, kWh_primaryEnergy description: The long name - meta_data: used to store more information about the element. Is not used internally, but saved in the results is_standard: true, if Standard-Effect (for direct input of value without effect (alternatively to dict)) , else false is_objective: true, if optimization target specific_share_to_other_effects_operation: {effectType: TS, ...}, i.g. 180 €/t_CO2, input as {costs: 180}, optional @@ -69,6 +68,7 @@ def __init__( maximum_invest: maximal sum (only invest) of the effect minimum_total: min sum of effect (invest+operation). maximum_total: max sum of effect (invest+operation). + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__(label, meta_data=meta_data) self.label = label diff --git a/flixOpt/elements.py b/flixOpt/elements.py index 5b2975905..f8d3cb75d 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -40,7 +40,6 @@ def __init__( """ Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results inputs: input flows. outputs: output flows. on_off_parameters: Information about on and off state of Component. @@ -49,6 +48,7 @@ def __init__( See class OnOffParameters. prevent_simultaneous_flows: Define a Group of Flows. Only one them can be on at a time. Induces On-Variable in all Flows! If possible, use OnOffParameters in a single Flow instead. + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__(label, meta_data=meta_data) self.inputs: List['Flow'] = inputs or [] @@ -86,10 +86,10 @@ def __init__( """ Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results excess_penalty_per_flow_hour: excess costs / penalty costs (bus balance compensation) (none/ 0 -> no penalty). The default is 1e5. (Take care: if you use a timeseries (no scalar), timeseries is aggregated if calculation_type = aggregated!) + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__(label, meta_data=meta_data) self.excess_penalty_per_flow_hour = excess_penalty_per_flow_hour @@ -153,8 +153,7 @@ def __init__( r""" Args: label: The name - meta_data: used to store more information about the element. Is not used internally, but saved in the results - bus: bus to which flow is linked + bus: bus (label) to which flow is linked size: size of the flow. If InvestmentParameters is used, size is optimized. If size is None, a default value is used. relative_minimum: min value is relative_minimum multiplied by size @@ -178,6 +177,7 @@ def __init__( used for fixed load or supply profiles, i.g. heat demand, wind-power, solarthermal If the load-profile is just an upper limit, use relative_maximum instead. previous_flow_rate: previous flow rate of the component. + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__(label, meta_data=meta_data) self.size = size or CONFIG.modeling.BIG # Default size diff --git a/flixOpt/linear_converters.py b/flixOpt/linear_converters.py index 13f78c36d..e0efa35e2 100644 --- a/flixOpt/linear_converters.py +++ b/flixOpt/linear_converters.py @@ -34,7 +34,7 @@ def __init__( Q_fu: fuel input-flow Q_th: thermal output-flow. on_off_parameters: Parameters defining the on/off behavior of the component. - meta_data: used to store more information about the element. Is not used internally, but saved in the results + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__( label, @@ -75,7 +75,7 @@ def __init__( P_el: electric input-flow Q_th: thermal output-flow. on_off_parameters: Parameters defining the on/off behavior of the component. - meta_data: used to store more information about the element. Is not used internally, but saved in the results + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__( label, @@ -117,7 +117,7 @@ def __init__( P_el: electricity input-flow. Q_th: thermal output-flow. on_off_parameters: Parameters defining the on/off behavior of the component. - meta_data: used to store more information about the element. Is not used internally, but saved in the results + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__( label, @@ -159,7 +159,7 @@ def __init__( P_el: electricity input-flow. Q_th: thermal input-flow. on_off_parameters: Parameters defining the on/off behavior of the component. - meta_data: used to store more information about the element. Is not used internally, but saved in the results + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ super().__init__( label, @@ -199,24 +199,15 @@ def __init__( meta_data: Optional[Dict] = None, ): """ - constructor of cCHP - - Parameters - ---------- - label: str - name of CHP-unit. - eta_th: float or TS - thermal efficiency. - eta_el: float or TS - electrical efficiency. - Q_fu: cFlow - fuel input-flow. - P_el: cFlow - electricity output-flow. - Q_th: cFlow - heat output-flow. - meta_data: Optional[Dict] - used to store more information about the element. Is not used internally, but saved in the results + Args: + label: The name + eta_th: thermal efficiency. + eta_el: electrical efficiency. + Q_fu: fuel input-flow. + P_el: electricity output-flow. + Q_th: heat output-flow. + on_off_parameters: Parameters defining the on/off behavior of the component. + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ heat = {Q_fu.label: eta_th, Q_th.label: 1} electricity = {Q_fu.label: eta_el, P_el.label: 1} @@ -275,7 +266,7 @@ def __init__( P_el: electricity input-flow. Q_th: thermal output-flow. on_off_parameters: Parameters defining the on/off behavior of the component. - meta_data: used to store more information about the element. Is not used internally, but saved in the results + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ # super: diff --git a/flixOpt/structure.py b/flixOpt/structure.py index 941634d4a..0312221fb 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -237,7 +237,7 @@ def __init__(self, label: str, meta_data: Dict = None): """ Args: label: The label of the element - meta_data: Used to store more information about the element. Is not used internally, but saved in the results + meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ self.label = Element._valid_label(label) self.meta_data = meta_data if meta_data is not None else {} From 7b77f83aee1aaa033f46675195edd84995b426ae Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:38:02 +0100 Subject: [PATCH 17/87] Improve docstrings of label --- flixOpt/components.py | 12 ++++++------ flixOpt/effects.py | 2 +- flixOpt/elements.py | 8 ++++---- flixOpt/linear_converters.py | 12 ++++++------ 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/flixOpt/components.py b/flixOpt/components.py index fb305656c..018e6cb63 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -39,7 +39,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem inputs: The input Flows outputs: The output Flows on_off_parameters: Information about on and off states. See class OnOffParameters. @@ -155,7 +155,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem charging: ingoing flow. discharging: outgoing flow. capacity_in_flow_hours: nominal capacity/size of the storage @@ -242,7 +242,7 @@ def __init__( with potential losses. Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem in1: The inflow at side A. Pass InvestmentParameters here. out1: The outflow at side B. in2: The optional inflow at side B. @@ -537,7 +537,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem source: output-flow of this component sink: input-flow of this component prevent_simultaneous_sink_and_source: If True, inflow and outflow can not be active simultaniously. @@ -560,7 +560,7 @@ class Source(Component): def __init__(self, label: str, source: Flow, meta_data: Optional[Dict] = None): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem source: output-flow of source meta_data: used to store more information about the Element. Is not used internally, but saved in the results. Only use python native types. """ @@ -573,7 +573,7 @@ class Sink(Component): def __init__(self, label: str, sink: Flow, meta_data: Optional[Dict] = None): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem meta_data: used to store more information about the element. Is not used internally, but saved in the results sink: input-flow of sink """ diff --git a/flixOpt/effects.py b/flixOpt/effects.py index 82cc1de8c..8e9b2215f 100644 --- a/flixOpt/effects.py +++ b/flixOpt/effects.py @@ -51,7 +51,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem unit: The unit of effect, i.g. €, kg_CO2, kWh_primaryEnergy description: The long name is_standard: true, if Standard-Effect (for direct input of value without effect (alternatively to dict)) , else false diff --git a/flixOpt/elements.py b/flixOpt/elements.py index f8d3cb75d..f8e3c1738 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -39,7 +39,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem inputs: input flows. outputs: output flows. on_off_parameters: Information about on and off state of Component. @@ -85,7 +85,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem excess_penalty_per_flow_hour: excess costs / penalty costs (bus balance compensation) (none/ 0 -> no penalty). The default is 1e5. (Take care: if you use a timeseries (no scalar), timeseries is aggregated if calculation_type = aggregated!) @@ -152,8 +152,8 @@ def __init__( ): r""" Args: - label: The name - bus: bus (label) to which flow is linked + label: The label of the FLow. Used to identify it in the FlowSystem. Its `full_label` consists of the label of the Component and the label of the Flow. + bus: blabel of the bus the flow is connected to. size: size of the flow. If InvestmentParameters is used, size is optimized. If size is None, a default value is used. relative_minimum: min value is relative_minimum multiplied by size diff --git a/flixOpt/linear_converters.py b/flixOpt/linear_converters.py index e0efa35e2..18c28830b 100644 --- a/flixOpt/linear_converters.py +++ b/flixOpt/linear_converters.py @@ -29,7 +29,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem eta: thermal efficiency. Q_fu: fuel input-flow Q_th: thermal output-flow. @@ -70,7 +70,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem eta: thermal efficiency. P_el: electric input-flow Q_th: thermal output-flow. @@ -112,7 +112,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem COP: Coefficient of performance. P_el: electricity input-flow. Q_th: thermal output-flow. @@ -154,7 +154,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem specific_electricity_demand: auxiliary electricty demand per cooling power, i.g. 0.02 (2 %). P_el: electricity input-flow. Q_th: thermal input-flow. @@ -200,7 +200,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem eta_th: thermal efficiency. eta_el: electrical efficiency. Q_fu: fuel input-flow. @@ -260,7 +260,7 @@ def __init__( ): """ Args: - label: The name + label: The label of the Element. Used to identify it in the FlowSystem COP: Coefficient of performance. Q_ab: Heatsource input-flow. P_el: electricity input-flow. From 9c3423e6afd93c9002ceff027f6538feb4d8c936 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:57:56 +0100 Subject: [PATCH 18/87] Improve docstrings of method --- flixOpt/flow_system.py | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/flixOpt/flow_system.py b/flixOpt/flow_system.py index 5700a462c..4b1d2c6eb 100644 --- a/flixOpt/flow_system.py +++ b/flixOpt/flow_system.py @@ -207,33 +207,23 @@ def plot_network( """ Visualizes the network structure of a FlowSystem using PyVis, saving it as an interactive HTML file. - Parameters: - - path (Union[bool, str, pathlib.Path], default='flow_system.html'): - Path to save the HTML visualization. - - `False`: Visualization is created but not saved. - - `str` or `Path`: Specifies file path (default: 'flow_system.html'). - - - controls (Union[bool, List[str]], default=True): - UI controls to add to the visualization. - - `True`: Enables all available controls. - - `List`: Specify controls, e.g., ['nodes', 'layout']. - - Options: 'nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer'. - - - show (bool, default=True): - Whether to open the visualization in the web browser. + Args: + path: Path to save the HTML visualization. + - `False`: Visualization is created but not saved. + - `str` or `Path`: Specifies file path (default: 'flow_system.html'). + controls: UI controls to add to the visualization. + - `True`: Enables all available controls. + - `List`: Specify controls, e.g., ['nodes', 'layout']. + - Options: 'nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer'. + show: Whether to open the visualization in the web browser. Returns: - Optional[pyvis.network.Network]: The `Network` instance representing the visualization, or `None` if `pyvis` is not installed. - Usage: - - Visualize and open the network with default options: - >>> self.plot_network() - - - Save the visualization without opening: - >>> self.plot_network(show=False) - - - Visualize with custom controls and path: - >>> self.plot_network(path='output/custom_network.html', controls=['nodes', 'layout']) + Examples: + >>> flow_system.plot_network() + >>> flow_system.plot_network(show=False) + >>> flow_system.plot_network(path='output/custom_network.html', controls=['nodes', 'layout']) Notes: - This function requires `pyvis`. If not installed, the function prints a warning and returns `None`. From 074e8fb071df4d2230ddf504df8cf64410b3357a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 18:58:11 +0100 Subject: [PATCH 19/87] Bugfix math and add contribute.md --- docs/contribute.md | 86 +++++++++++++++++++++++++++++++++++++ docs/examples.md | 1 + docs/javascripts/mathjax.js | 9 ++-- mkdocs.yml | 8 ++-- 4 files changed, 94 insertions(+), 10 deletions(-) create mode 100644 docs/contribute.md create mode 100644 docs/examples.md diff --git a/docs/contribute.md b/docs/contribute.md new file mode 100644 index 000000000..e8f8b90a8 --- /dev/null +++ b/docs/contribute.md @@ -0,0 +1,86 @@ +# Contributing to the Project + +We warmly welcome contributions from the community! This guide will help you get started with contributing to our project. + +## Ways to Contribute + +There are many ways you can contribute: + +1. **Reporting Bugs** + - Open clear, detailed issues on our GitHub repository + - Include: + - Steps to reproduce + - Expected behavior + - Actual behavior + - Your environment details + +2. **Suggesting Enhancements** + - Submit feature requests as GitHub issues + - Provide: + - Clear description of the proposed feature + - Potential use cases + - Any initial thoughts on implementation + +3. **Code Contributions** + - Fork the repository + - Create a new branch for your feature or bugfix + - Write clear, documented code + - Ensure all tests pass + - Add tests for your code, if your changes are complex + - Submit a pull request + +## Development Setup +```bash +# Clone the repository +git clone https://github.com/flixOpt/flixopt.git +cd flixopt + +# It's recommended to use a virtual environment +python -m venv venv +source venv/bin/activate # On Windows, use `venv\Scripts\activate` + +# Install development dependencies +pip install -e .[dev] # This installs the package in editable mode with development dependencies + +# Create a new branch +git checkout -b feature/your-feature-name + +# Run tests +pytest + +# Run a linter to improve code quality +ruff check . --fix +``` + +# Best practices + +## Coding Guidelines + +- Follow PEP 8 style guidelines +- Write clear, commented code +- Include type hints +- Create or update tests for new functionality +- Ensure 100% test coverage for new code + +## Branches +As we start to think flixOpt in **Releases**, we decided to introduce multiple **dev**-branches instead of only one: +Following the **Semantic Versioning** guidelines, we introduced: +- `next/patch`: This is where all pull requests for the next patch release (1.0.x) go. +- `next/minor`: This is where all pull requests for the next minor release (1.x.0) go. +- `next/major`: This is where all pull requests for the next major release (x.0.0) go. + +- Everything else remains in `feature/...`-branches. + +## Pull requests +Every feature or bugfix should be merged into one of the 3 [release branches](#branches), using **Squash and merge** or a regular **single commit**. +At some point, `next/minor` or `next/major` will get merged into `main` using a regular **Merge** (not squash). +*This ensures that Features are kept separate, and the `next/...`branches stay in synch with ``main`.* + +**Remember to update the version in `pyproject.toml`** + +## Releases +As stated, we follow **Semantic Versioning**. +Right after one of the 3 [release branches](#Branches) is merged into main, a **Tag** should be added to the merge commit and pushed to the main branch. The tag has the form `v1.2.3`. +With this tag, a release with **Release Notes** must be created. + +*This is our current best practice* diff --git a/docs/examples.md b/docs/examples.md new file mode 100644 index 000000000..f57f1d405 --- /dev/null +++ b/docs/examples.md @@ -0,0 +1 @@ +TODO: Add examples \ No newline at end of file diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js index 95d619efc..e9dbb21cc 100644 --- a/docs/javascripts/mathjax.js +++ b/docs/javascripts/mathjax.js @@ -1,12 +1,11 @@ window.MathJax = { tex: { - inlineMath: [["\\(", "\\)"]], - displayMath: [["\\[", "\\]"]], + inlineMath: [['$', '$'], ['\\(', '\\)']], + displayMath: [['$$', '$$'], ['\\[', '\\]']], processEscapes: true, - processEnvironments: true + tags: 'ams' }, options: { - ignoreHtmlClass: ".*|", - processHtmlClass: "arithmatex" + skipHtmlTags: ['script', 'noscript', 'style', 'textarea', 'pre'] } }; \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index af4c9cfa9..b4ff487cc 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -9,9 +9,7 @@ nav: - Home: index.md - Getting Started: getting-started.md - Concepts: concepts/overview.md - - Examples: - - Basic Example: examples/basic.md - - Advanced Example: examples/advanced.md + - Examples: examples.md - API Reference: - FlowSystem: api/flow-system.md - Elements: @@ -35,7 +33,7 @@ nav: - Calculation: api/calculation.md - Datatypes: api/datatypes.md - - LaTeX: latex-example.md + - Display Math example: latex-example.md - Contribute: contribute.md @@ -156,7 +154,7 @@ plugins: merge_init_into_class: true # Sets the base heading level for documented objects (h2) - heading_level: 2 + heading_level: 1 # Orders members as they appear in the source code members_order: source From 3928f0325c8b59777070da21aac61e4f2d640ced Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 14 Mar 2025 19:04:43 +0100 Subject: [PATCH 20/87] Improve contribute.md --- docs/contribute.md | 57 +++++++++------------------------------------- 1 file changed, 11 insertions(+), 46 deletions(-) diff --git a/docs/contribute.md b/docs/contribute.md index e8f8b90a8..ad71a7078 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -2,56 +2,21 @@ We warmly welcome contributions from the community! This guide will help you get started with contributing to our project. -## Ways to Contribute - -There are many ways you can contribute: - -1. **Reporting Bugs** - - Open clear, detailed issues on our GitHub repository - - Include: - - Steps to reproduce - - Expected behavior - - Actual behavior - - Your environment details - -2. **Suggesting Enhancements** - - Submit feature requests as GitHub issues - - Provide: - - Clear description of the proposed feature - - Potential use cases - - Any initial thoughts on implementation - -3. **Code Contributions** - - Fork the repository - - Create a new branch for your feature or bugfix - - Write clear, documented code - - Ensure all tests pass - - Add tests for your code, if your changes are complex - - Submit a pull request - ## Development Setup -```bash -# Clone the repository -git clone https://github.com/flixOpt/flixopt.git -cd flixopt - -# It's recommended to use a virtual environment -python -m venv venv -source venv/bin/activate # On Windows, use `venv\Scripts\activate` - -# Install development dependencies -pip install -e .[dev] # This installs the package in editable mode with development dependencies - -# Create a new branch -git checkout -b feature/your-feature-name +1. Clone the repository `git clone https://github.com/flixOpt/flixopt.git` +2. Install the development dependencies `pip install -editable .[dev, docs]` +3. Run `pytest` and `ruff check .` to ensure your code passes all tests -# Run tests -pytest +## Documentation +flixOpt uses [mkdocs](https://www.mkdocs.org/) to generate documentation. To preview the documentation locally, run `mkdocs serve` in the root directory. -# Run a linter to improve code quality -ruff check . --fix -``` +## Helpful Commands +- `mkdocs serve` to preview the documentation locally. Navigate to `http://127.0.0.1:8000/` to view the documentation. +- `pytest` to run the test suite (You can also run the provided python script `run_all_test.py`) +- `ruff check .` to run the linter +- `ruff check . --fix` to automatically fix linting issues +--- # Best practices ## Coding Guidelines From bddbc6292759eb84a05a36161792b409f5ba93ee Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 12:11:08 +0100 Subject: [PATCH 21/87] Improve contribute.md --- docs/contribute.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/contribute.md b/docs/contribute.md index ad71a7078..96182ffdc 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -41,8 +41,6 @@ Every feature or bugfix should be merged into one of the 3 [release branches](#b At some point, `next/minor` or `next/major` will get merged into `main` using a regular **Merge** (not squash). *This ensures that Features are kept separate, and the `next/...`branches stay in synch with ``main`.* -**Remember to update the version in `pyproject.toml`** - ## Releases As stated, we follow **Semantic Versioning**. Right after one of the 3 [release branches](#Branches) is merged into main, a **Tag** should be added to the merge commit and pushed to the main branch. The tag has the form `v1.2.3`. From c955328d76933d3606dee1553ff6b594827ad2f6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 12:53:06 +0100 Subject: [PATCH 22/87] Add docstring math to the docs --- mkdocs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mkdocs.yml b/mkdocs.yml index b4ff487cc..7b54ff612 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -96,6 +96,8 @@ markdown_extensions: - tables - pymdownx.tabbed: alternate_style: true + - pymdownx.arithmatex: + generic: true - pymdownx.emoji: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:material.extensions.emoji.to_svg From b89daafe899ba4dbd59780a4d34f7808e60f80b7 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 12:53:18 +0100 Subject: [PATCH 23/87] Add math to Bus --- flixOpt/elements.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/flixOpt/elements.py b/flixOpt/elements.py index f8e3c1738..a90e02e30 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -75,9 +75,34 @@ def infos(self, use_numpy=True, use_element_label=False) -> Dict: @register_class_for_io class Bus(Element): - """ - realizing balance of all linked flows - (penalty flow is excess can be activated) + r""" + A Bus represents a nodal balance between the flow rates of its incoming and outgoing **Flows** + ($\mathcal{F}_\text{in}$ and $\mathcal{F}_\text{out}$), + which must hold for every time step $\text{t}_i \in \mathcal{T}$. + + $$ + \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_\text{in}}(\text{t}_i) = + \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) + $$ + + To handle ifeasabilities gently, 2 variables $\phi_\text{in}(\text{t}_i)\geq0$ and + $\phi_\text{out}(\text{t}_i)\geq0$ might be introduced. + These represent the missing or excess flow_rate in Bus. E certain amount of penalty occurs for each missing or + excess flow_rate in the balance (`excess_penalty_per_flow_hour`), so they usually dont affect the Optimization. + The penalty term is defined as + + $$ + s_{b \rightarrow \Phi}(\text{t}_i) = + \text a_{b \rightarrow \Phi}(\text{t}_i) \cdot \Delta \text{t}_i + \cdot [ \phi_\text{in}(\text{t}_i) + \phi_\text{out}(\text{t}_i) ] + $$ + + Which changes the balance to + + $$ + \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_ \text{in}}(\text{t}_i) + \phi_\text{in}(\text{t}_i) = + \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) + \phi_\text{out}(\text{t}_i) + $$ """ def __init__( From 6ee2b58bfbc07f49b46d218a99e4ae43763335cc Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 17:47:13 +0100 Subject: [PATCH 24/87] Improve mathjax rendering without reloading --- docs/javascripts/mathjax.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js index e9dbb21cc..e5f97bfa7 100644 --- a/docs/javascripts/mathjax.js +++ b/docs/javascripts/mathjax.js @@ -8,4 +8,11 @@ window.MathJax = { options: { skipHtmlTags: ['script', 'noscript', 'style', 'textarea', 'pre'] } -}; \ No newline at end of file +}; + +document$.subscribe(() => { + MathJax.startup.output.clearCache() + MathJax.typesetClear() + MathJax.texReset() + MathJax.typesetPromise() +}) \ No newline at end of file From 185f7f359febbd6d4029edbd60b3d0f902d1340e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 18:09:00 +0100 Subject: [PATCH 25/87] Use references for all equations --- docs/javascripts/mathjax.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js index e5f97bfa7..bb7094d50 100644 --- a/docs/javascripts/mathjax.js +++ b/docs/javascripts/mathjax.js @@ -3,7 +3,7 @@ window.MathJax = { inlineMath: [['$', '$'], ['\\(', '\\)']], displayMath: [['$$', '$$'], ['\\[', '\\]']], processEscapes: true, - tags: 'ams' + tags: 'all' }, options: { skipHtmlTags: ['script', 'noscript', 'style', 'textarea', 'pre'] From 8bc2183c05b652ce51e7ebe486aa907fb9710a60 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 18:09:11 +0100 Subject: [PATCH 26/87] Add Compoennt docs --- docs/api/components/component.md | 1 + mkdocs.yml | 1 + 2 files changed, 2 insertions(+) create mode 100644 docs/api/components/component.md diff --git a/docs/api/components/component.md b/docs/api/components/component.md new file mode 100644 index 000000000..473c6d8f0 --- /dev/null +++ b/docs/api/components/component.md @@ -0,0 +1 @@ +::: flixOpt.elements.Component \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 7b54ff612..931933e9b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -17,6 +17,7 @@ nav: - Flow: api/flow.md - Bus: api/bus.md - Components: + - Component: api/components/component.md - Storage: api/components/storage.md - LinearConverter: api/components/linear-converter.md - Transmission: api/components/transmission.md From 659cc51824480fb710ce80623609e2ae9e4c1347 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 18:09:18 +0100 Subject: [PATCH 27/87] Improve docstrings --- flixOpt/calculation.py | 2 +- flixOpt/components.py | 36 ++++++++++++++++++++++++++++++++--- flixOpt/elements.py | 43 +++++++++++++++++++++++++++++++++++++----- 3 files changed, 72 insertions(+), 9 deletions(-) diff --git a/flixOpt/calculation.py b/flixOpt/calculation.py index 4642ed2c9..9c3816e83 100644 --- a/flixOpt/calculation.py +++ b/flixOpt/calculation.py @@ -283,7 +283,7 @@ def _perform_aggregation(self): class SegmentedCalculation(Calculation): def __init__( self, - name, + name: str, flow_system: FlowSystem, timesteps_per_segment: int, overlap_timesteps: int, diff --git a/flixOpt/components.py b/flixOpt/components.py index 018e6cb63..0997c16c5 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -21,10 +21,12 @@ logger = logging.getLogger('flixOpt') + @register_class_for_io class LinearConverter(Component): """ - Converts input-Flows into output-Flows via linear conversion factors + Converts input-Flows into output-Flows via linear conversion factors + """ def __init__( @@ -126,8 +128,36 @@ def degrees_of_freedom(self): @register_class_for_io class Storage(Component): - """ - Klasse Storage + r""" + **Storages** have one incoming and one outgoing **Flow** - $f_\text{in}$ and $f_\text{out}$ - + each with an efficiency $\eta_\text{in}$ and $\eta_\text{out}$. Further, storages have a size $\text C$ and a + state of charge $c(\text{t}_i)$. + Similarly to the flow-rate $p(\text{t}_i)$ of a `Flow`, the `size` $\text C$ combined with a relative upper bound + $\text c^{\text{U}}_\text{rel}(\text t_{i})$ and lower bound $\text c^{\text{L}}_\text{rel}(\text t_{i})$ + limits the state of charge $c(\text{t}_i)$ by $\eqref{eq:Storage_Bounds}$. + + $$ \label{eq:Storage_Bounds} + \text C \cdot \text c^{\text{L}}_{\text{rel}}(\text t_{i}) + \leq c(\text{t}_i) \leq + \text C \cdot \text c^{\text{U}}_{\text{rel}}(\text t_{i}) + $$ + + With $\text c^{\text{L}}_{\text{rel}}(\text t_{i}) = 0$ and $\text c^{\text{U}}_{\text{rel}}(\text t_{i}) = 1$, + Equation $\eqref{eq:Storage_Bounds}$ simplifies to + + $$ 0 \leq c(\text t_{i}) \leq \text C $$ + + The state of charge $c(\text{t}_i)$ decreases by a fraction of the prior state of charge. The belonging parameter + $ \dot{ \text c}_\text{rel, loss}(\text{t}_i)$ expresses the "loss fraction per hour". The storage balance from $\text{t}_i$ to $\text t_{i+1}$ is + + $$ + \begin{align*} + c(\text{t}_{i+1}) &= c(\text{t}_{i}) \cdot (1-\dot{\text{c}}_\text{rel,loss}(\text{t}_i) \cdot \Delta \text{t}_{i}) \\ + &\quad + p_{f_\text{in}}(\text{t}_i) \cdot \Delta \text{t}_i \cdot \eta_\text{in}(\text{t}_i) \\ + &\quad - \frac{p_{f_\text{out}}(\text{t}_i) \cdot \Delta \text{t}_i}{\eta_\text{out}(\text{t}_i)} + \tag{3} + \end{align*} + $$ """ # TODO: Dabei fällt mir auf. Vielleicht sollte man mal überlegen, ob man für Ladeleistungen bereits in dem diff --git a/flixOpt/elements.py b/flixOpt/elements.py index a90e02e30..60f669a29 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -25,7 +25,11 @@ @register_class_for_io class Component(Element): """ - basic component class for all components + A Component contains incoming and outgoing [`Flows`][flixOpt.elements.Flow]. It defines how these Flows interact with each other. + The On or Off state of the Component is defined by all its Flows. Its on, if any of its FLows is On. + It's mathematically advisable to define the On/Off state in a FLow rather than a Component if possible, + as this introduces less binary variables to the Model + Constraints to the On/Off state are defined by the [`on_off_parameters`][flixOpt.interface.OnOffParameters]. """ def __init__( @@ -76,7 +80,7 @@ def infos(self, use_numpy=True, use_element_label=False) -> Dict: @register_class_for_io class Bus(Element): r""" - A Bus represents a nodal balance between the flow rates of its incoming and outgoing **Flows** + A Bus represents a nodal balance between the flow rates of its incoming and outgoing [Flows][flixOpt.elements.Flow] ($\mathcal{F}_\text{in}$ and $\mathcal{F}_\text{out}$), which must hold for every time step $\text{t}_i \in \mathcal{T}$. @@ -85,7 +89,7 @@ class Bus(Element): \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) $$ - To handle ifeasabilities gently, 2 variables $\phi_\text{in}(\text{t}_i)\geq0$ and + To handle ifeasiblities gently, 2 variables $\phi_\text{in}(\text{t}_i)\geq0$ and $\phi_\text{out}(\text{t}_i)\geq0$ might be introduced. These represent the missing or excess flow_rate in Bus. E certain amount of penalty occurs for each missing or excess flow_rate in the balance (`excess_penalty_per_flow_hour`), so they usually dont affect the Optimization. @@ -154,8 +158,37 @@ def __init__(self): @register_class_for_io class Flow(Element): - """ - flows are inputs and outputs of components + r""" + Flows are the inputs and outputs of [Component][flixOpt.elements.Component] + and connect them to [Busses][flixOpt.elements.Bus]. + A **Flow** moves energy (or material) between a [Bus][flixOpt.elements.Bus] and a [Component][flixOpt.elements.Component] in a predefined direction. + The flow-rate $p(\text{t}_{i})$ is the main optimization variable of the **Flow**. + The size $\text P$ of the **Flow** combined with a relative upper bound $\text p_{\text{rel}}^{\text{U}}(\text{t}_{i})$ + and lower bound $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i})$ limits the flow-rate per time step $p(\text{t}_{i})$. + + $$ + \text P \cdot \text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) + \leq p(\text{t}_{i}) \leq + \text P \cdot \text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) \tag{1} + $$ + + With $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) = 0$ and $\text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) = 1$, + equation (1) simplifies to + + $$ + 0 \leq p(\text{t}_{i}) \leq \text P + $$ + + With $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) = \text p^{\text{U}}_{\text{rel}}(\text{t}_{i})$, + the flow-rate $p(\text{t}_{i})$ is fixed. + + $$ + p(\text{t}_{i}) = \text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) \cdot \text P + $$ + + This mathematical Formulation can be extended or changed when using [`OnOffParameters`][flixOpt.interface.OnOffParameters] + to define the On/Off state of the Flow, or [`InvestParameters`][flixOpt.interface.InvestParameters], + which changes the size of the Flow to be optimized. """ def __init__( From 0a6415f732294026d02372a6350a2fe2eb6a80c4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 18:38:07 +0100 Subject: [PATCH 28/87] Move math from classes to docs --- docs/api/components/storage.md | 36 ++++++++++++++++++++++++++- flixOpt/components.py | 45 ++++++---------------------------- 2 files changed, 42 insertions(+), 39 deletions(-) diff --git a/docs/api/components/storage.md b/docs/api/components/storage.md index 09a362a63..8d1204d9c 100644 --- a/docs/api/components/storage.md +++ b/docs/api/components/storage.md @@ -1,6 +1,40 @@ +# Mathematical Formulation + +**Storages** have one incoming and one outgoing **Flow** - $f_\text{in}$ and $f_\text{out}$ - +each with an efficiency $\eta_\text{in}$ and $\eta_\text{out}$. +Further, storages have a `size` $\text C$ and a state of charge $c(\text{t}_i)$. +Similarly to the flow-rate $p(\text{t}_i)$ of a [`Flow`][flixOpt.elements.Flow], +the `size` $\text C$ combined with a relative upper bound +$\text c^{\text{U}}_\text{rel}(\text t_{i})$ and lower bound $\text c^{\text{L}}_\text{rel}(\text t_{i})$ +limits the state of charge $c(\text{t}_i)$ by $\eqref{eq:Storage_Bounds}$. + +$$ \label{eq:Storage_Bounds} + \text C \cdot \text c^{\text{L}}_{\text{rel}}(\text t_{i}) + \leq c(\text{t}_i) \leq + \text C \cdot \text c^{\text{U}}_{\text{rel}}(\text t_{i}) +$$ + +With $\text c^{\text{L}}_{\text{rel}}(\text t_{i}) = 0$ and $\text c^{\text{U}}_{\text{rel}}(\text t_{i}) = 1$, +Equation $\eqref{eq:Storage_Bounds}$ simplifies to + +$$ 0 \leq c(\text t_{i}) \leq \text C $$ + +The state of charge $c(\text{t}_i)$ decreases by a fraction of the prior state of charge. The belonging parameter +$ \dot{ \text c}_\text{rel, loss}(\text{t}_i)$ expresses the "loss fraction per hour". The storage balance from $\text{t}_i$ to $\text t_{i+1}$ is + +$$ +\begin{align*} + c(\text{t}_{i+1}) &= c(\text{t}_{i}) \cdot (1-\dot{\text{c}}_\text{rel,loss}(\text{t}_i) \cdot \Delta \text{t}_{i}) \\ + &\quad + p_{f_\text{in}}(\text{t}_i) \cdot \Delta \text{t}_i \cdot \eta_\text{in}(\text{t}_i) \\ + &\quad - \frac{p_{f_\text{out}}(\text{t}_i) \cdot \Delta \text{t}_i}{\eta_\text{out}(\text{t}_i)} + \tag{3} +\end{align*} +$$ + ::: flixOpt.components.Storage -### Creating a Storage + +## Creating a Storage ```python import flixOpt as fx diff --git a/flixOpt/components.py b/flixOpt/components.py index 0997c16c5..2d161ecd5 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -128,44 +128,6 @@ def degrees_of_freedom(self): @register_class_for_io class Storage(Component): - r""" - **Storages** have one incoming and one outgoing **Flow** - $f_\text{in}$ and $f_\text{out}$ - - each with an efficiency $\eta_\text{in}$ and $\eta_\text{out}$. Further, storages have a size $\text C$ and a - state of charge $c(\text{t}_i)$. - Similarly to the flow-rate $p(\text{t}_i)$ of a `Flow`, the `size` $\text C$ combined with a relative upper bound - $\text c^{\text{U}}_\text{rel}(\text t_{i})$ and lower bound $\text c^{\text{L}}_\text{rel}(\text t_{i})$ - limits the state of charge $c(\text{t}_i)$ by $\eqref{eq:Storage_Bounds}$. - - $$ \label{eq:Storage_Bounds} - \text C \cdot \text c^{\text{L}}_{\text{rel}}(\text t_{i}) - \leq c(\text{t}_i) \leq - \text C \cdot \text c^{\text{U}}_{\text{rel}}(\text t_{i}) - $$ - - With $\text c^{\text{L}}_{\text{rel}}(\text t_{i}) = 0$ and $\text c^{\text{U}}_{\text{rel}}(\text t_{i}) = 1$, - Equation $\eqref{eq:Storage_Bounds}$ simplifies to - - $$ 0 \leq c(\text t_{i}) \leq \text C $$ - - The state of charge $c(\text{t}_i)$ decreases by a fraction of the prior state of charge. The belonging parameter - $ \dot{ \text c}_\text{rel, loss}(\text{t}_i)$ expresses the "loss fraction per hour". The storage balance from $\text{t}_i$ to $\text t_{i+1}$ is - - $$ - \begin{align*} - c(\text{t}_{i+1}) &= c(\text{t}_{i}) \cdot (1-\dot{\text{c}}_\text{rel,loss}(\text{t}_i) \cdot \Delta \text{t}_{i}) \\ - &\quad + p_{f_\text{in}}(\text{t}_i) \cdot \Delta \text{t}_i \cdot \eta_\text{in}(\text{t}_i) \\ - &\quad - \frac{p_{f_\text{out}}(\text{t}_i) \cdot \Delta \text{t}_i}{\eta_\text{out}(\text{t}_i)} - \tag{3} - \end{align*} - $$ - """ - - # TODO: Dabei fällt mir auf. Vielleicht sollte man mal überlegen, ob man für Ladeleistungen bereits in dem - # jeweiligen Zeitschritt mit einem Verlust berücksichtigt. Zumindest für große Zeitschritte bzw. große Verluste - # eventuell relevant. - # -> Sprich: speicherverlust = charge_state(t) * relative_loss_per_hour * dt + 0.5 * Q_lade(t) * dt * relative_loss_per_hour * dt - # -> müsste man aber auch für den sich ändernden Ladezustand berücksichtigten - def __init__( self, label: str, @@ -184,6 +146,13 @@ def __init__( meta_data: Optional[Dict] = None, ): """ + Storages have one incoming and one outgoing Flow each with an efficiency. + Further, storages have a `size` and a `charge_state`. + Similarly to the flow-rate of a Flow, the `size` combined with a relative upper and lower bound + limits the `charge_state` of the storage. + + For mathematical details take a look at our online documentation + Args: label: The label of the Element. Used to identify it in the FlowSystem charging: ingoing flow. From e7cbdcda89d338aef3459eb207ab6257f393dccc Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 20:06:57 +0100 Subject: [PATCH 29/87] Move math from classes to docs --- docs/api/components/storage.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/api/components/storage.md b/docs/api/components/storage.md index 8d1204d9c..81f9aa459 100644 --- a/docs/api/components/storage.md +++ b/docs/api/components/storage.md @@ -14,6 +14,13 @@ $$ \label{eq:Storage_Bounds} \text C \cdot \text c^{\text{U}}_{\text{rel}}(\text t_{i}) $$ +Where: + +- $\text C$ is the storage capacity +- $c(\text{t}_i)$ is the state of charge at time $\text{t}_i$ +- $\text c^{\text{L}}_{\text{rel}}(\text t_{i})$ is the relative lower bound (typically 0) +- $\text c^{\text{U}}_{\text{rel}}(\text t_{i})$ is the relative upper bound (typically 1) + With $\text c^{\text{L}}_{\text{rel}}(\text t_{i}) = 0$ and $\text c^{\text{U}}_{\text{rel}}(\text t_{i}) = 1$, Equation $\eqref{eq:Storage_Bounds}$ simplifies to @@ -31,6 +38,17 @@ $$ \end{align*} $$ +Where: + +- $c(\text{t}_{i+1})$ is the state of charge at time $\text{t}_{i+1}$ +- $c(\text{t}_{i})$ is the state of charge at time $\text{t}_{i}$ +- $\dot{\text{c}}_\text{rel,loss}(\text{t}_i)$ is the relative loss rate (self-discharge) per hour +- $\Delta \text{t}_{i}$ is the time step duration in hours +- $p_{f_\text{in}}(\text{t}_i)$ is the input flow rate at time $\text{t}_i$ +- $\eta_\text{in}(\text{t}_i)$ is the charging efficiency at time $\text{t}_i$ +- $p_{f_\text{out}}(\text{t}_i)$ is the output flow rate at time $\text{t}_i$ +- $\eta_\text{out}(\text{t}_i)$ is the discharging efficiency at time $\text{t}_i$ + ::: flixOpt.components.Storage From bdee76efda485f0b2cd1f00cb1e56735e730706c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 20:13:47 +0100 Subject: [PATCH 30/87] Move math to the bottom --- docs/api/components/storage.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/api/components/storage.md b/docs/api/components/storage.md index 81f9aa459..2d6498a37 100644 --- a/docs/api/components/storage.md +++ b/docs/api/components/storage.md @@ -1,4 +1,6 @@ -# Mathematical Formulation +::: flixOpt.components.Storage + +## Mathematical Formulation **Storages** have one incoming and one outgoing **Flow** - $f_\text{in}$ and $f_\text{out}$ - each with an efficiency $\eta_\text{in}$ and $\eta_\text{out}$. @@ -49,8 +51,6 @@ Where: - $p_{f_\text{out}}(\text{t}_i)$ is the output flow rate at time $\text{t}_i$ - $\eta_\text{out}(\text{t}_i)$ is the discharging efficiency at time $\text{t}_i$ -::: flixOpt.components.Storage - ## Creating a Storage From 99f5c96f62d2d5134d167bdee7e45682e9dc431f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 20:56:22 +0100 Subject: [PATCH 31/87] Update docs and update color --- mkdocs.yml | 85 +++++++++++++++++++----------------------------------- 1 file changed, 29 insertions(+), 56 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index 931933e9b..a7168d5a6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,3 +1,7 @@ +# Options: +# https://mkdocstrings.github.io/python/usage/configuration/docstrings/ +# https://squidfunk.github.io/mkdocs-material/setup/ + site_name: flixOpt site_description: Energy and Material Flow Optimization Framework site_url: https://flixopt.github.io/flixopt/ @@ -44,16 +48,16 @@ theme: # Light mode - media: "(prefers-color-scheme: light)" scheme: default - primary: indigo # Try different colors like: deep purple, blue, teal - accent: purple + primary: teal + accent: blue toggle: icon: material/brightness-7 name: Switch to dark mode # Dark mode - media: "(prefers-color-scheme: dark)" scheme: slate - primary: indigo # Can be different from light mode - accent: purple + primary: teal # Can be different from light mode + accent: blue toggle: icon: material/brightness-4 name: Switch to light mode @@ -112,58 +116,27 @@ plugins: handlers: python: # Configuration for Python code documentation options: - # Controls which members to include or exclude from documentation - # "!^_[^_]" excludes private members (single underscore) - # "^__init__" explicitly includes constructor methods - filters: ["!^_[^_]", "^__init__"] - - # Sets google as the docstring style - docstring_style: google - - # whether the documented object's name should be displayed as a heading at the beginning of its documentation - show_root_heading: true - - # Improves type annotations - modernize_annotations: true - - # Renders parameter sections as tables instead of lists for better readability - docstring_section_style: table - - # Hides the source code implementation from documentation - show_source: false - - # Displays simple class names instead of full import paths - show_object_full_path: false - - # Shows class attributes in the documentation - show_docstring_attributes: true - - # Documents objects even if they don't have docstrings - show_if_no_docstring: true - - # Displays category headings (Methods, Attributes, etc.) for organization - show_category_heading: true - - # Shows method signatures with parameters - show_signature: true - - # Includes type annotations in the signatures when available - show_signature_annotations: true - - # Displays signatures separate from descriptions for cleaner layout - separate_signature: true - - # Promotes constructor parameters to class-level documentation - merge_init_into_class: true - - # Sets the base heading level for documented objects (h2) - heading_level: 1 - - # Orders members as they appear in the source code - members_order: source - - # Include members inherited from parent classes - inherited_members: true + docstring_style: google # Sets google as the docstring style + modernize_annotations: true # Improves type annotations + merge_init_into_class: true # Promotes constructor parameters to class-level documentation + docstring_section_style: table # Renders parameter sections as a table (also: list, spacy) + + members_order: source # Orders members as they appear in the source code + inherited_members: true # Include members inherited from parent classes + show_if_no_docstring: false # Documents objects even if they don't have docstrings + + group_by_category: true + heading_level: 1 # Sets the base heading level for documented objects + line_length: 80 + filters: ["!^_", "^__init__$"] + show_root_heading: true # whether the documented object's name should be displayed as a heading at the beginning of its documentation + show_source: false # Shows the source code implementation from documentation + show_object_full_path: false # Displays simple class names instead of full import paths + show_docstring_attributes: true # Shows class attributes in the documentation + show_category_heading: true # Displays category headings (Methods, Attributes, etc.) for organization + show_signature: true # Shows method signatures with parameters + show_signature_annotations: true # Includes type annotations in the signatures when available + separate_signature: true # Displays signatures separate from descriptions for cleaner layout extra: # Uses Python type hints to supplement docstring information From 4ca84fdd7c5713653f660fd3438122bccab91e54 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 21:04:07 +0100 Subject: [PATCH 32/87] Update docs --- mkdocs.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index a7168d5a6..f0a2fbb45 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -139,8 +139,7 @@ plugins: separate_signature: true # Displays signatures separate from descriptions for cleaner layout extra: - # Uses Python type hints to supplement docstring information - infer_type_annotations: true + infer_type_annotations: true # Uses Python type hints to supplement docstring information extra_javascript: - javascripts/mathjax.js # Custom MathJax 3 CDN Configuration From 15df071061a4fdbcf9f29f0b9ec7f1a3ba6a1cad Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 21:04:15 +0100 Subject: [PATCH 33/87] Improve docstrings --- flixOpt/flow_system.py | 6 ++++++ flixOpt/structure.py | 7 ++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/flixOpt/flow_system.py b/flixOpt/flow_system.py index 4b1d2c6eb..ba16900c3 100644 --- a/flixOpt/flow_system.py +++ b/flixOpt/flow_system.py @@ -82,6 +82,12 @@ def from_dataset(cls, ds: xr.Dataset): @classmethod def from_dict(cls, data: Dict) -> 'FlowSystem': + """ + Load a FlowSystem from a dictionary. + + Args: + data: Dictionary containing the FlowSystem data. + """ timesteps_extra = pd.DatetimeIndex(data['timesteps_extra'], name='time') hours_of_last_timestep = TimeSeriesCollection.calculate_hours_per_timestep(timesteps_extra).isel(time=-1).item() diff --git a/flixOpt/structure.py b/flixOpt/structure.py index 0312221fb..d745261f9 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -214,7 +214,12 @@ def _deserialize_value(cls, value: Any): @classmethod def from_dict(cls, data: Dict) -> 'Interface': - """Create an instance from a dictionary representation.""" + """ + Create an instance from a dictionary representation. + + Args: + data: Dictionary containing the data for the object. + """ return cls._deserialize_dict(data) def __repr__(self): From df28df1d84b3e1bba44170c4565f0e15ebe0df24 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 21:11:09 +0100 Subject: [PATCH 34/87] Update remaining docstrings to Google style --- flixOpt/core.py | 12 ++++++------ flixOpt/features.py | 27 +++++++++------------------ flixOpt/flow_system.py | 6 ++---- flixOpt/plotting.py | 30 ++++++++++-------------------- flixOpt/results.py | 26 +++++++++++++------------- flixOpt/structure.py | 23 +++++++++-------------- 6 files changed, 49 insertions(+), 75 deletions(-) diff --git a/flixOpt/core.py b/flixOpt/core.py index 76f554fbf..d55211353 100644 --- a/flixOpt/core.py +++ b/flixOpt/core.py @@ -152,7 +152,7 @@ def from_datasource(cls, """ Initialize the TimeSeries from multiple data sources. - Parameters: + Args: data: The time series data name: The name of the TimeSeries timesteps: The timesteps of the TimeSeries @@ -176,7 +176,7 @@ def from_json(cls, data: Optional[Dict[str, Any]] = None, path: Optional[str] = """ Load a TimeSeries from a dictionary or json file. - Parameters: + Args: data: Dictionary containing TimeSeries data path: Path to a JSON file containing TimeSeries data @@ -258,7 +258,7 @@ def to_json(self, path: Optional[pathlib.Path] = None) -> Dict[str, Any]: """ Save the TimeSeries to a dictionary or JSON file. - Parameters: + Args: path: Optional path to save JSON file Returns: @@ -316,7 +316,7 @@ def active_timesteps(self, timesteps: Optional[pd.DatetimeIndex]): """ Set active_timesteps and refresh active_data. - Parameters: + Args: timesteps: New timesteps to activate, or None to use all stored timesteps Raises: @@ -346,7 +346,7 @@ def stored_data(self, value: NumericData): """ Update stored_data and refresh active_data. - Parameters: + Args: value: New data to store """ new_data = DataConverter.as_dataarray(value, timesteps=self.active_timesteps) @@ -409,7 +409,7 @@ def __gt__(self, other): """ Compare if this TimeSeries is greater than another. - Parameters: + Args: other: Another TimeSeries to compare with Returns: diff --git a/flixOpt/features.py b/flixOpt/features.py index 3b2db08a6..7059a2f69 100644 --- a/flixOpt/features.py +++ b/flixOpt/features.py @@ -388,20 +388,16 @@ def _get_duration_in_hours( The minimum duration in the last time step is not restricted. Previous values before t=0 are not recognised! - Parameters: - variable_label (str): - Label for the duration variable to be created. - binary_variable (linopy.Variable): - Time-series binary variable (e.g., [0, 0, 1, 1, 1, 0, ...]) representing activity states. - minimum_duration (Optional[TimeSeries]): - Minimum duration the activity must remain active once started. + Args: + variable_name: Label for the duration variable to be created. + binary_variable: Time-series binary variable (e.g., [0, 0, 1, 1, 1, 0, ...]) representing activity states. + minimum_duration: Minimum duration the activity must remain active once started. If None, no minimum duration constraint is applied. - maximum_duration (Optional[TimeSeries]): - Maximum duration the activity can remain active. + maximum_duration: Maximum duration the activity can remain active. If None, the maximum duration is set to the total available time. Returns: - linopy.Variable: The created duration variable representing consecutive active durations. + The created duration variable representing consecutive active durations. Example: binary_variable: [0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, ...] @@ -603,16 +599,11 @@ def compute_previous_on_states(previous_values: List[Optional[NumericData]], eps """ Computes the previous 'on' states {0, 1} of defining variables as a binary array from their previous values. - Parameters: - ---------- - previous_values: List[NumericData] - List of previous values of the defining variables. In Range [0, inf] or None (ignored) - epsilon: float, optional - Tolerance for equality to determine "off" state, default is 1e-5. + Args: + previous_values: List of previous values of the defining variables. In Range [0, inf] or None (ignored) + epsilon: Tolerance for equality to determine "off" state, default is 1e-5. Returns: - ------- - np.ndarray A binary array (0 and 1) indicating the previous on/off states of the variables. Returns `array([0])` if no previous values are available. """ diff --git a/flixOpt/flow_system.py b/flixOpt/flow_system.py index ba16900c3..3ec3b0a7e 100644 --- a/flixOpt/flow_system.py +++ b/flixOpt/flow_system.py @@ -154,10 +154,8 @@ def to_json(self, path: Union[str, pathlib.Path]): This not meant to be reloaded and recreate the object, but rather used to document or compare the flow_system to others. - Parameters: - ----------- - path: Union[str, pathlib.Path] - The path to the json file. + Args: + path: The path to the json file. """ with open(path, 'w', encoding='utf-8') as f: json.dump(self.as_dict('stats'), f, indent=4, ensure_ascii=False) diff --git a/flixOpt/plotting.py b/flixOpt/plotting.py index 1390e4d32..a02c8014e 100644 --- a/flixOpt/plotting.py +++ b/flixOpt/plotting.py @@ -620,27 +620,17 @@ def plot_network( """ Visualizes the network structure of a FlowSystem using PyVis, using info-dictionaries. - Parameters: - - path (Union[bool, str, pathlib.Path], default='results/network.html'): - Path to save the HTML visualization. - - `False`: Visualization is created but not saved. - - `str` or `Path`: Specifies file path (default: 'results/network.html'). - - - controls (Union[bool, List[str]], default=True): - UI controls to add to the visualization. - - `True`: Enables all available controls. - - `List`: Specify controls, e.g., ['nodes', 'layout']. - - Options: 'nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer'. - You can play with these and generate a Dictionary from it that can be applied to the network returned by this function. - network.set_options() - https://pyvis.readthedocs.io/en/latest/tutorial.html - - - show (bool, default=True): - Whether to open the visualization in the web browser. - The calculation must be saved to show it. If no path is given, it defaults to 'network.html'. - + Args: + path: Path to save the HTML visualization. `False`: Visualization is created but not saved. `str` or `Path`: Specifies file path (default: 'results/network.html'). + controls: UI controls to add to the visualization. `True`: Enables all available controls. `List`: Specify controls, e.g., ['nodes', 'layout']. + Options: 'nodes', 'edges', 'layout', 'interaction', 'manipulation', 'physics', 'selection', 'renderer'. + You can play with these and generate a Dictionary from it that can be applied to the network returned by this function. + network.set_options() + https://pyvis.readthedocs.io/en/latest/tutorial.html + show: Whether to open the visualization in the web browser. + The calculation must be saved to show it. If no path is given, it defaults to 'network.html'. Returns: - - Optional[pyvis.network.Network]: The `Network` instance representing the visualization, or `None` if `pyvis` is not installed. + The `Network` instance representing the visualization, or `None` if `pyvis` is not installed. Usage: - Visualize and open the network with default options: diff --git a/flixOpt/results.py b/flixOpt/results.py index 5331f8894..dbb79c2c7 100644 --- a/flixOpt/results.py +++ b/flixOpt/results.py @@ -431,15 +431,15 @@ def plotly_save_and_show(fig: plotly.graph_objs.Figure, """ Optionally saves and/or displays a Plotly figure. - Parameters: - - fig (go.Figure): The Plotly figure to display or save. - - default_filename (Path): The default file path if no user filename is provided. - - user_filename (Optional[Path]): An optional user-specified file path. - - show (bool): Whether to display the figure (default: True). - - save (bool): Whether to save the figure (default: False). + Args: + fig: The Plotly figure to display or save. + default_filename: The default file path if no user filename is provided. + user_filename: An optional user-specified file path. + show: Whether to display the figure (default: True). + save: Whether to save the figure (default: False). Returns: - - go.Figure: The input figure. + go.Figure: The input figure. """ filename = user_filename or default_filename if show and not save: @@ -484,14 +484,14 @@ def sanitize_dataset( """ Sanitizes a dataset by dropping variables with small values and optionally reindexing the time axis. - Parameters: - - ds (xr.Dataset): The dataset to sanitize. - - timesteps (Optional[pd.DatetimeIndex]): The timesteps to reindex the dataset to. If None, the original timesteps are kept. - - threshold (Optional[float]): The threshold for dropping variables. If None, no variables are dropped. - - negate (Optional[List[str]]): The variables to negate. If None, no variables are negated. + Args: + ds: The dataset to sanitize. + timesteps: The timesteps to reindex the dataset to. If None, the original timesteps are kept. + threshold: The threshold for dropping variables. If None, no variables are dropped. + negate: The variables to negate. If None, no variables are negated. Returns: - - xr.Dataset: The sanitized dataset. + xr.Dataset: The sanitized dataset. """ if negate is not None: for var in negate: diff --git a/flixOpt/structure.py b/flixOpt/structure.py index d745261f9..8be90a05e 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -98,18 +98,15 @@ def infos(self, use_numpy=True, use_element_label=False) -> Dict: Excludes default values and empty dictionaries and lists. Converts data to be compatible with JSON. - Parameters: - ----------- - use_numpy bool: - Whether to convert NumPy arrays to lists. Defaults to True. - If True, numeric numpy arrays (`np.ndarray`) are preserved as-is. - If False, they are converted to lists. - use_element_label bool: - Whether to use the element label instead of the infos of the element. Defaults to False. - Note that Elements used as keys in dictionaries are always converted to their labels. + Args: + use_numpy: Whether to convert NumPy arrays to lists. Defaults to True. + If True, numeric numpy arrays (`np.ndarray`) are preserved as-is. + If False, they are converted to lists. + use_element_label: Whether to use the element label instead of the infos of the element. Defaults to False. + Note that Elements used as keys in dictionaries are always converted to their labels. Returns: - Dict: A dictionary representation of the object's constructor arguments. + A dictionary representation of the object's constructor arguments. """ # Get the constructor arguments and their default values @@ -134,10 +131,8 @@ def to_json(self, path: Union[str, pathlib.Path]): Saves the element to a json file. This not meant to be reloaded and recreate the object, but rather used to document or compare the object. - Parameters: - ----------- - path: Union[str, pathlib.Path] - The path to the json file. + Args: + path: The path to the json file. """ data = get_compact_representation(self.infos(use_numpy=True, use_element_label=True)) with open(path, 'w', encoding='utf-8') as f: From 81d64895fe251279741f5f36bc42af6a61f901e0 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 15 Mar 2025 21:22:38 +0100 Subject: [PATCH 35/87] Update remaining docstrings to Google style --- flixOpt/calculation.py | 11 +- flixOpt/core.py | 58 +++------ flixOpt/features.py | 70 ++++------- flixOpt/flow_system.py | 16 +-- flixOpt/linear_converters.py | 24 ++-- flixOpt/plotting.py | 232 +++++++++++++---------------------- flixOpt/results.py | 21 +--- flixOpt/structure.py | 57 ++++----- flixOpt/utils.py | 23 ++-- 9 files changed, 183 insertions(+), 329 deletions(-) diff --git a/flixOpt/calculation.py b/flixOpt/calculation.py index 9c3816e83..614334f60 100644 --- a/flixOpt/calculation.py +++ b/flixOpt/calculation.py @@ -168,13 +168,10 @@ def save_results(self, save_flow_system: bool = False, compression: int = 0): The calculation infos are saved as a .yaml file. Optionally, the flow_system is saved as a .nc file. - Parameters - ---------- - save_flow_system: bool, optional - Whether to save the flow_system, by default False - compression: int, optional - Compression level for the netCDF file, by default 0 wich leads to no compression. - Currently, only the Flow System file can be compressed. + Args: + save_flow_system: Whether to save the flow_system, by default False + compression: Compression level for the netCDF file, by default 0 wich leads to no compression. + Currently, only the Flow System file can be compressed. """ with open(self.folder / f'{self.name}_infos.yaml', 'w', encoding='utf-8') as f: yaml.dump(self.infos, f, allow_unicode=True, sort_keys=False, indent=4) diff --git a/flixOpt/core.py b/flixOpt/core.py index d55211353..b20bdcd62 100644 --- a/flixOpt/core.py +++ b/flixOpt/core.py @@ -525,18 +525,13 @@ def create_time_series( """ Creates a TimeSeries from the given data and adds it to the collection. - Parameters - ---------- - data: Union[int, float, np.ndarray, pd.Series, pd.DataFrame, xr.DataArray] + Args: + data: The data to create the TimeSeries from. + name: The name of the TimeSeries. + needs_extra_timestep: Whether to create an additional timestep at the end of the timesteps. The data to create the TimeSeries from. - name: str - The name of the TimeSeries. - needs_extra_timestep: bool, optional - Whether to create an additional timestep at the end of the timesteps. - - Returns - ------- - TimeSeries + + Returns: The created TimeSeries. """ @@ -587,11 +582,10 @@ def activate_timesteps(self, active_timesteps: Optional[pd.DatetimeIndex] = None Update active timesteps for the collection and all time series. If no arguments are provided, the active timesteps are reset. - Parameters - ---------- - active_timesteps: Optional[pd.DatetimeIndex] - The active timesteps of the model. - If None, the all timesteps of the TimeSeriesCollection are taken.""" + Args: + active_timesteps: The active timesteps of the model. + If None, the all timesteps of the TimeSeriesCollection are taken. + """ if active_timesteps is None: return self.reset() @@ -633,12 +627,9 @@ def insert_new_data(self, data: pd.DataFrame, include_extra_timestep: bool = Fal """ Update time series with new data from a DataFrame. - Parameters - ---------- - data: pd.DataFrame - DataFrame containing new data with timestamps as index - include_extra_timestep: bool, optional - Whether the provided data already includes the extra timestep, by default False + Args: + data: DataFrame containing new data with timestamps as index + include_extra_timestep: Whether the provided data already includes the extra timestep, by default False """ if not isinstance(data, pd.DataFrame): raise TypeError(f"data must be a pandas DataFrame, got {type(data).__name__}") @@ -681,16 +672,11 @@ def to_dataframe(self, """ Convert collection to DataFrame with optional filtering and timestep control. - Parameters - ---------- - filtered: Literal['all', 'constant', 'non_constant'], optional - Filter time series by variability, by default 'non_constant' - include_extra_timestep: bool, optional - Whether to include the extra timestep in the result, by default True + Args: + filtered: Filter time series by variability, by default 'non_constant' + include_extra_timestep: Whether to include the extra timestep in the result, by default True - Returns - ------- - pd.DataFrame + Returns: DataFrame representation of the collection """ include_constants = filtered != 'non_constant' @@ -715,14 +701,10 @@ def to_dataset(self, include_constants: bool = True) -> xr.Dataset: """ Combine all time series into a single Dataset with all timesteps. - Parameters - ---------- - include_constants: bool, optional - Whether to include time series with constant values, by default True + Args: + include_constants: Whether to include time series with constant values, by default True - Returns - ------- - xr.Dataset + Returns: Dataset containing all selected time series with all timesteps """ # Determine which series to include diff --git a/flixOpt/features.py b/flixOpt/features.py index 7059a2f69..2e4754463 100644 --- a/flixOpt/features.py +++ b/flixOpt/features.py @@ -193,22 +193,14 @@ def __init__( """ Constructor for OnOffModel - Parameters - ---------- - model: SystemModel - Reference to the SystemModel - on_off_parameters: OnOffParameters - Parameters for the OnOffModel - label_of_element: - Label of the Parent - defining_variables: - List of Variables that are used to define the OnOffModel - defining_bounds: - List of Tuples, defining the absolute bounds of each defining variable - previous_values: - List of previous values of the defining variables - label: - Label of the OnOffModel + Args: + model: Reference to the SystemModel + on_off_parameters: Parameters for the OnOffModel + label_of_element: Label of the Parent + defining_variables: List of Variables that are used to define the OnOffModel + defining_bounds: List of Tuples, defining the absolute bounds of each defining variable + previous_values: List of previous values of the defining variables + label: Label of the OnOffModel """ super().__init__(model, label_of_element, label) assert len(defining_variables) == len(defining_bounds), 'Every defining Variable needs bounds to Model OnOff' @@ -628,16 +620,11 @@ def compute_consecutive_duration( hours_per_timestep is handled in a way, that maximizes compatability. Its length must only be as long as the last consecutive duration in binary_values. - Parameters - ---------- - binary_values: int, np.ndarray - An int or 1D binary array containing only `0`s and `1`s. - hours_per_timestep: int, float, np.ndarray - The duration of each timestep in hours. + Args: + binary_values: An int or 1D binary array containing only `0`s and `1`s. + hours_per_timestep: The duration of each timestep in hours. - Returns - ------- - np.ndarray + Returns: The duration of the binary variable in hours. Raises @@ -735,19 +722,15 @@ def __init__( label: str = 'MultipleSegments', ): """ - Parameters - ---------- - model: linopy.Model - Model to which the segmented variable belongs. - label_of_element: str - Name of the parent variable. - sample_points: dict[str, list[tuple[float, float]]] - Dictionary mapping variables (names) to their sample points for each segment. - The sample points are tuples of the form (start, end). - can_be_outside_segments: bool or linopy.Variable, optional - Whether the variable can be outside the segments. If True, a variable is created. - If False or None, no variable is created. If a Variable is passed, it is used. - as_time_series: bool, optional + Args: + model: Model to which the segmented variable belongs. + label_of_element: Name of the parent variable. + sample_points: Dictionary mapping variables (names) to their sample points for each segment. + The sample points are tuples of the form (start, end). + can_be_outside_segments: Whether the variable can be outside the segments. If True, a variable is created. + If False or None, no variable is created. If a Variable is passed, it is used. + as_time_series: Whether to create a scalar or time series variable. + label: Name of the Model. """ super().__init__(model, label_of_element, label) self.outside_segments: Optional[linopy.Variable] = None @@ -889,14 +872,9 @@ def add_share( The variable representing the total share is on the left hand side (lhs) of the constraint. var_total = sum(expressions) - Parameters - ---------- - system_model: SystemModel - The system model. - name: str - The name of the share. - expression: linopy.LinearExpression - The expression of the share. Added to the right hand side of the constraint. + Args: + name: The name of the share. + expression: The expression of the share. Added to the right hand side of the constraint. """ if name in self.shares: self.share_constraints[name].lhs -= expression diff --git a/flixOpt/flow_system.py b/flixOpt/flow_system.py index 3ec3b0a7e..5632b2d1a 100644 --- a/flixOpt/flow_system.py +++ b/flixOpt/flow_system.py @@ -124,13 +124,11 @@ def from_netcdf(cls, path: Union[str, pathlib.Path]): def add_elements(self, *elements: Element) -> None: """ - add all modeling elements, like storages, boilers, heatpumps, buses, ... - - Parameters - ---------- - *elements: childs of Element like Boiler, HeatPump, Bus,... - modeling Elements + Add Components(Storages, Boilers, Heatpumps, ...), Buses or Effects to the FlowSystem + Args: + *elements: childs of Element like Boiler, HeatPump, Bus,... + modeling Elements """ if self._connected: warnings.warn( @@ -333,10 +331,8 @@ def _check_if_element_is_unique(self, element: Element) -> None: """ checks if element or label of element already exists in list - Parameters - ---------- - element: Element - new element to check + Args: + element: new element to check """ if element in self.all_elements.values(): raise Exception(f'Element {element.label} already added to FlowSystem!') diff --git a/flixOpt/linear_converters.py b/flixOpt/linear_converters.py index 18c28830b..adf3a4161 100644 --- a/flixOpt/linear_converters.py +++ b/flixOpt/linear_converters.py @@ -298,26 +298,16 @@ def COP(self, value): # noqa: N802 def check_bounds( value: NumericDataTS, parameter_label: str, element_label: str, lower_bound: NumericDataTS, upper_bound: NumericDataTS -): +) -> None: """ Check if the value is within the bounds. The bounds are exclusive. If not, log a warning. - Parameters - ---------- - value: NumericDataTS - The value to check. - parameter_label: str - The label of the value. - element_label: str - The label of the element. - lower_bound: NumericDataTS - The lower bound. - upper_bound: NumericDataTS - The upper bound. - - Returns - ------- - + Args: + value: The value to check. + parameter_label: The label of the value. + element_label: The label of the element. + lower_bound: The lower bound. + upper_bound: The upper bound. """ if isinstance(value, TimeSeriesData): value = value.data diff --git a/flixOpt/plotting.py b/flixOpt/plotting.py index a02c8014e..9a90c664d 100644 --- a/flixOpt/plotting.py +++ b/flixOpt/plotting.py @@ -35,47 +35,28 @@ def with_plotly( """ Plot a DataFrame with Plotly, using either stacked bars or stepped lines. - Parameters - ---------- - data: pd.DataFrame - A DataFrame containing the data to plot, where the index represents - time (e.g., hours), and each column represents a separate data series. - mode: {'bar', 'line'}, default='bar' - The plotting mode. Use 'bar' for stacked bar charts or 'line' for - stepped lines. - colors: List[str], str, default='viridis' - A List of colors (as str) or a name of a colorscale (e.g., 'viridis', 'plasma') to use for - coloring the data series. - title: str - The title of the plot. - ylabel: str - The label for the y-axis. - fig: go.Figure, optional - A Plotly figure object to plot on. If not provided, a new figure - will be created. - show: bool - Wether to show the figure after creation. (This includes saving the figure) - save: bool - Wether to save the figure after creation (without showing) - path: Union[str, pathlib.Path] - Path to save the figure. - - Returns - ------- - go.Figure + Args: + data: A DataFrame containing the data to plot, where the index represents time (e.g., hours), and each column represents a separate data series. + mode: The plotting mode. Use 'bar' for stacked bar charts or 'line' for stepped lines. + colors: A List of colors (as str) or a name of a colorscale (e.g., 'viridis', 'plasma') to use for coloring the data series. + title: The title of the plot. + ylabel: The label for the y-axis. + fig: A Plotly figure object to plot on. If not provided, a new figure will be created. + show: Wether to show the figure after creation. (This includes saving the figure) + save: Wether to save the figure after creation (without showing) + path: Path to save the figure. + + Returns: A Plotly figure object containing the generated plot. - Notes - ----- - - If `mode` is 'bar', bars are stacked for each data series. - - If `mode` is 'line', a stepped line is drawn for each data series. - - The legend is positioned below the plot for a cleaner layout when many - data series are present. - - Examples - -------- - >>> fig = with_plotly(data, mode='bar', colorscale='plasma') - >>> fig.show() + Notes: + - If `mode` is 'bar', bars are stacked for each data series. + - If `mode` is 'line', a stepped line is drawn for each data series. + - The legend is positioned below the plot for a cleaner layout when many data series are present. + + Examples: + >>> fig = with_plotly(data, mode='bar', colorscale='plasma') + >>> fig.show() """ assert mode in ['bar', 'line', 'area'], f"'mode' must be one of {['bar', 'line', 'area']}" if data.empty: @@ -208,45 +189,28 @@ def with_matplotlib( """ Plot a DataFrame with Matplotlib using stacked bars or stepped lines. - Parameters - ---------- - data: pd.DataFrame - A DataFrame containing the data to plot. The index should represent - time (e.g., hours), and each column represents a separate data series. - mode: {'bar', 'line'}, default='bar' - Plotting mode. Use 'bar' for stacked bar charts or 'line' for stepped lines. - colors: List[str], str, default='viridis' - A List of colors (as str) or a name of a colorscale (e.g., 'viridis', 'plasma') to use for - coloring the data series. - figsize: Tuple[int, int], optional - Specify the size of the figure - fig: plt.Figure, optional - A Matplotlib figure object to plot on. If not provided, a new figure - will be created. - ax: plt.Axes, optional - A Matplotlib axes object to plot on. If not provided, a new axes - will be created. - show: bool - Wether to show the figure after creation. - path: Union[str, pathlib.Path] - Path to save the figure to. - - Returns - ------- - Tuple[plt.Figure, plt.Axes] + Args: + data: A DataFrame containing the data to plot. The index should represent time (e.g., hours), and each column represents a separate data series. + mode: Plotting mode. Use 'bar' for stacked bar charts or 'line' for stepped lines. + colors: A List of colors (as str) or a name of a colorscale (e.g., 'viridis', 'plasma') to use for coloring the data series. + figsize: Specify the size of the figure + fig: A Matplotlib figure object to plot on. If not provided, a new figure will be created. + ax: A Matplotlib axes object to plot on. If not provided, a new axes will be created. + show: Wether to show the figure after creation. + path: Path to save the figure to. + + Returns: A tuple containing the Matplotlib figure and axes objects used for the plot. - Notes - ----- - - If `mode` is 'bar', bars are stacked for both positive and negative values. - Negative values are stacked separately without extra labels in the legend. - - If `mode` is 'line', stepped lines are drawn for each data series. - - The legend is placed below the plot to accommodate multiple data series. - - Examples - -------- - >>> fig, ax = with_matplotlib(data, mode='bar', colorscale='plasma') - >>> plt.show() + Notes: + - If `mode` is 'bar', bars are stacked for both positive and negative values. + Negative values are stacked separately without extra labels in the legend. + - If `mode` is 'line', stepped lines are drawn for each data series. + - The legend is placed below the plot to accommodate multiple data series. + + Examples: + >>> fig, ax = with_matplotlib(data, mode='bar', colorscale='plasma') + >>> plt.show() """ assert mode in ['bar', 'line'], f"'mode' must be one of {['bar', 'line']} for matplotlib" @@ -325,32 +289,23 @@ def heat_map_matplotlib( Plots a DataFrame as a heatmap using Matplotlib. The columns of the DataFrame will be displayed on the x-axis, the index will be displayed on the y-axis, and the values will represent the 'heat' intensity in the plot. - Parameters - ---------- - data: pd.DataFrame - A DataFrame containing the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. - The values in the DataFrame will be represented as colors in the heatmap. - color_map: str, optional - The colormap to use for the heatmap. Default is 'viridis'. Matplotlib supports various colormaps like 'plasma', 'inferno', 'cividis', etc. - figsize: tuple of float, optional - The size of the figure to create. Default is (12, 6), which results in a width of 12 inches and a height of 6 inches. - show: bool - Wether to show the figure after creation. - path: Union[str, pathlib.Path] - Path to save the figure to. - - Returns - ------- - tuple of (plt.Figure, plt.Axes) + Args: + data: A DataFrame containing the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. + The values in the DataFrame will be represented as colors in the heatmap. + color_map: The colormap to use for the heatmap. Default is 'viridis'. Matplotlib supports various colormaps like 'plasma', 'inferno', 'cividis', etc. + figsize: The size of the figure to create. Default is (12, 6), which results in a width of 12 inches and a height of 6 inches. + show: Wether to show the figure after creation. + path: Path to save the figure to. + + Returns: A tuple containing the Matplotlib `Figure` and `Axes` objects. The `Figure` contains the overall plot, while the `Axes` is the area where the heatmap is drawn. These can be used for further customization or saving the plot to a file. - Notes - ----- - - The y-axis is flipped so that the first row of the DataFrame is displayed at the top of the plot. - - The color scale is normalized based on the minimum and maximum values in the DataFrame. - - The x-axis labels (periods) are placed at the top of the plot. - - The colorbar is added horizontally at the bottom of the plot, with a label. + Notes: + - The y-axis is flipped so that the first row of the DataFrame is displayed at the top of the plot. + - The color scale is normalized based on the minimum and maximum values in the DataFrame. + - The x-axis labels (periods) are placed at the top of the plot. + - The colorbar is added horizontally at the bottom of the plot, with a label. """ # Get the min and max values for color normalization @@ -404,33 +359,23 @@ def heat_map_plotly( Plots a DataFrame as a heatmap using Plotly. The columns of the DataFrame will be mapped to the x-axis, and the index will be displayed on the y-axis. The values in the DataFrame will represent the 'heat' in the plot. - Parameters - ---------- - data: pd.DataFrame - A DataFrame with the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. - The values in the DataFrame will be represented as colors in the heatmap. - color_map: str, optional - The color scale to use for the heatmap. Default is 'viridis'. Plotly supports various color scales like 'Cividis', 'Inferno', etc. - categorical_labels: bool, optional - If True, the x and y axes are treated as categorical data (i.e., the index and columns will not be interpreted as continuous data). - Default is True. If False, the axes are treated as continuous, which may be useful for time series or numeric data. - show: bool - Wether to show the figure after creation. (This includes saving the figure) - save: bool - Wether to save the figure after creation (without showing) - path: Union[str, pathlib.Path] - Path to save the figure. - - Returns - ------- - go.Figure + Args: + data: A DataFrame with the data to be visualized. The index will be used for the y-axis, and columns will be used for the x-axis. + The values in the DataFrame will be represented as colors in the heatmap. + color_map: The color scale to use for the heatmap. Default is 'viridis'. Plotly supports various color scales like 'Cividis', 'Inferno', etc. + categorical_labels: If True, the x and y axes are treated as categorical data (i.e., the index and columns will not be interpreted as continuous data). + Default is True. If False, the axes are treated as continuous, which may be useful for time series or numeric data. + show: Wether to show the figure after creation. (This includes saving the figure) + save: Wether to save the figure after creation (without showing) + path: Path to save the figure. + + Returns: A Plotly figure object containing the heatmap. This can be further customized and saved or displayed using `fig.show()`. - Notes - ----- - The color bar is automatically scaled to the minimum and maximum values in the data. - The y-axis is reversed to display the first row at the top. + Notes: + The color bar is automatically scaled to the minimum and maximum values in the data. + The y-axis is reversed to display the first row at the top. """ color_bar_min, color_bar_max = data.min().min(), data.max().max() # Min and max values for color scaling @@ -479,18 +424,12 @@ def reshape_to_2d(data_1d: np.ndarray, nr_of_steps_per_column: int) -> np.ndarra The reshaped array will have the number of rows corresponding to the steps per column (e.g., 24 hours per day) and columns representing time periods (e.g., days or months). - Parameters - ---------- - data_1d: np.ndarray - A 1D numpy array with the data to reshape. - - nr_of_steps_per_column: int - The number of steps (rows) per column in the resulting 2D array. For example, - this could be 24 (for hours) or 31 (for days in a month). + Args: + data_1d: A 1D numpy array with the data to reshape. + nr_of_steps_per_column: The number of steps (rows) per column in the resulting 2D array. For example, + this could be 24 (for hours) or 31 (for days in a month). - Returns - ------- - np.ndarray + Returns: The reshaped 2D array. Each internal array corresponds to one column, with the specified number of steps. Each column might represents a time period (e.g., day, month, etc.). """ @@ -533,22 +472,15 @@ def heat_map_data_from_df( based on a specified sample rate. If a non-valid combination of periods and steps per period is used, falls back to numerical indices - Parameters - ---------- - df: pd.DataFrame - A DataFrame with a DateTime index containing the data to reshape. - periods: str - The time interval of each period (columns of the heatmap), - such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. - steps_per_period: str - The time interval within each period (rows in the heatmap), - such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. - fill: str, optional - Method to fill missing values: 'ffill' for forward fill or 'bfill' for backward fill. - - Returns - ------- - pd.DataFrame + Args: + df: A DataFrame with a DateTime index containing the data to reshape. + periods: The time interval of each period (columns of the heatmap), + such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. + steps_per_period: The time interval within each period (rows in the heatmap), + such as 'YS' (year start), 'W' (weekly), 'D' (daily), 'h' (hourly) etc. + fill: Method to fill missing values: 'ffill' for forward fill or 'bfill' for backward fill. + + Returns: A DataFrame suitable for heatmap plotting, with rows representing steps within each period and columns representing each period. """ diff --git a/flixOpt/results.py b/flixOpt/results.py index dbb79c2c7..b965c79a9 100644 --- a/flixOpt/results.py +++ b/flixOpt/results.py @@ -27,15 +27,6 @@ class CalculationResults: This class is used to collect the results of a Calculation. It is used to analyze the results and to visualize the results. - Parameters - ---------- - model: linopy.Model - The linopy model that was used to solve the calculation. - infos: Dict - Information about the calculation, - results_structure: Dict[str, Dict[str, Dict]] - The structure of the flow_system that was used to solve the calculation. - Attributes ---------- model: linopy.Model @@ -51,13 +42,11 @@ class CalculationResults: hours_per_timestep: xr.DataArray The duration of each timestep in hours. - Class Methods - ------- - from_file(folder: Union[str, pathlib.Path], name: str) - Create CalculationResults directly from file. - from_calculation(calculation: Calculation) - Create CalculationResults directly from a Calculation. - + Class Methods: + from_file(folder: Union[str, pathlib.Path], name: str) + Create CalculationResults directly from file. + from_calculation(calculation: Calculation) + Create CalculationResults directly from a Calculation. """ @classmethod def from_file(cls, folder: Union[str, pathlib.Path], name: str): diff --git a/flixOpt/structure.py b/flixOpt/structure.py index 8be90a05e..7d51090a8 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -312,12 +312,9 @@ def add( """ Add a variable, constraint or sub-model to the model - Parameters - ---------- - item: linopy.Variable, linopy.Constraint, InterfaceModel - The variable, constraint or sub-model to add to the model - short_name: str, optional - The short name of the variable, constraint or sub-model. If not provided, the full name is used. + Args: + item: The variable, constraint or sub-model to add to the model + short_name: The short name of the variable, constraint or sub-model. If not provided, the full name is used. """ # TODO: Check uniquenes of short names if isinstance(item, linopy.Variable): @@ -445,43 +442,33 @@ def copy_and_convert_datatypes(data: Any, use_numpy: bool = True, use_element_la - Custom `Element` objects can be represented either by their `label` or their initialization parameters as a dictionary. - Timestamps (`datetime`) are converted to ISO 8601 strings. - Parameters - ---------- - data: Any - The input data to process, which may be deeply nested and contain a mix of types. - use_numpy: bool, optional - If `True`, numeric numpy arrays (`np.ndarray`) are preserved as-is. If `False`, they are converted to lists. - Default is `True`. - use_element_label: bool, optional - If `True`, `Element` objects are represented by their `label`. If `False`, they are converted into a dictionary - based on their initialization parameters. Default is `False`. - - Returns - ------- - Any + Args: + data: The input data to process, which may be deeply nested and contain a mix of types. + use_numpy: If `True`, numeric numpy arrays (`np.ndarray`) are preserved as-is. If `False`, they are converted to lists. + Default is `True`. + use_element_label: If `True`, `Element` objects are represented by their `label`. If `False`, they are converted into a dictionary + based on their initialization parameters. Default is `False`. + + Returns: A transformed version of the input data, containing only JSON-compatible types: - `int`, `float`, `str`, `bool`, `None` - `list`, `dict` - `np.ndarray` (if `use_numpy=True`. This is NOT JSON-compatible) - Raises - ------ - TypeError - If the data cannot be converted to the specified types. + Raises: + TypeError: If the data cannot be converted to the specified types. - Examples - -------- - >>> copy_and_convert_datatypes({'a': np.array([1, 2, 3]), 'b': Element(label='example')}) - {'a': array([1, 2, 3]), 'b': {'class': 'Element', 'label': 'example'}} + Examples: + >>> copy_and_convert_datatypes({'a': np.array([1, 2, 3]), 'b': Element(label='example')}) + {'a': array([1, 2, 3]), 'b': {'class': 'Element', 'label': 'example'}} - >>> copy_and_convert_datatypes({'a': np.array([1, 2, 3]), 'b': Element(label='example')}, use_numpy=False) - {'a': [1, 2, 3], 'b': {'class': 'Element', 'label': 'example'}} + >>> copy_and_convert_datatypes({'a': np.array([1, 2, 3]), 'b': Element(label='example')}, use_numpy=False) + {'a': [1, 2, 3], 'b': {'class': 'Element', 'label': 'example'}} - Notes - ----- - - The function gracefully handles unexpected types by issuing a warning and returning a deep copy of the data. - - Empty collections (lists, dictionaries) and default parameter values in `Element` objects are omitted from the output. - - Numpy arrays with non-numeric data types are automatically converted to lists. + Notes: + - The function gracefully handles unexpected types by issuing a warning and returning a deep copy of the data. + - Empty collections (lists, dictionaries) and default parameter values in `Element` objects are omitted from the output. + - Numpy arrays with non-numeric data types are automatically converted to lists. """ if isinstance(data, np.integer): # This must be checked before checking for regular int and float! return int(data) diff --git a/flixOpt/utils.py b/flixOpt/utils.py index c59aa1191..af0f103e5 100644 --- a/flixOpt/utils.py +++ b/flixOpt/utils.py @@ -34,16 +34,19 @@ def convert_dataarray(data: xr.DataArray, mode: Literal['py', 'numpy', 'xarray', """ Convert a DataArray to a different format. - Parameters - ---------- - data: xr.DataArray - The data to convert. - mode: Literal['py', 'numpy', 'xarray', 'structure'] - Whether to return the dataaray to - - python native types (for json) - - numpy array - - xarray.DataArray - - strings (for structure, storing variable names) + Args: + data: The DataArray to convert. + mode: The mode to convert to. + - 'py': Convert to python native types (for json) + - 'numpy': Convert to numpy array + - 'xarray': Convert to xarray.DataArray + - 'structure': Convert to strings (for structure, storing variable names) + + Returns: + The converted data. + + Raises: + ValueError: If the mode is unknown. """ if mode == 'numpy': return data.values From 588f4cb23a8ac5a7b19d728cd71efbb8dfd185c2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 15:54:58 +0100 Subject: [PATCH 36/87] Update remaining docstrings to Google style and add plotting and results to documentation --- docs/api/plotting.md | 7 +++++++ docs/api/results.md | 11 +++++++++++ flixOpt/calculation.py | 2 +- flixOpt/results.py | 33 +++++++++++++-------------------- mkdocs.yml | 2 ++ 5 files changed, 34 insertions(+), 21 deletions(-) create mode 100644 docs/api/plotting.md create mode 100644 docs/api/results.md diff --git a/docs/api/plotting.md b/docs/api/plotting.md new file mode 100644 index 000000000..714a7f2f3 --- /dev/null +++ b/docs/api/plotting.md @@ -0,0 +1,7 @@ +::: flixOpt.plotting.with_plotly + +::: flixOpt.plotting.heat_map_matplotlib + +::: flixOpt.plotting.heat_map_plotly + +::: flixOpt.plotting.reshape_to_2d \ No newline at end of file diff --git a/docs/api/results.md b/docs/api/results.md new file mode 100644 index 000000000..6e1db4d38 --- /dev/null +++ b/docs/api/results.md @@ -0,0 +1,11 @@ +::: flixOpt.results.CalculationResults + +::: flixOpt.results.SegmentedCalculationResults + +::: flixOpt.results.EffectResults + +::: flixOpt.results.BusResults + +::: flixOpt.results.ComponentResults + +::: flixOpt.results.plot_heatmap \ No newline at end of file diff --git a/flixOpt/calculation.py b/flixOpt/calculation.py index 614334f60..c90097ded 100644 --- a/flixOpt/calculation.py +++ b/flixOpt/calculation.py @@ -201,7 +201,7 @@ def __init__( folder: Optional[pathlib.Path] = None ): """ - Class for Optimizing the FLowSystem including: + Class for Optimizing the `FlowSystem` including: 1. Aggregating TimeSeriesData via typical periods using tsam. 2. Equalizing variables of typical periods. Args: diff --git a/flixOpt/results.py b/flixOpt/results.py index b965c79a9..326d1c693 100644 --- a/flixOpt/results.py +++ b/flixOpt/results.py @@ -27,26 +27,19 @@ class CalculationResults: This class is used to collect the results of a Calculation. It is used to analyze the results and to visualize the results. - Attributes - ---------- - model: linopy.Model - The linopy model that was used to solve the calculation. - components: Dict[str, ComponentResults] - A dictionary of ComponentResults for each component in the flow_system. - buses: Dict[str, BusResults] - A dictionary of BusResults for each bus in the flow_system. - effects: Dict[str, EffectResults] - A dictionary of EffectResults for each effect in the flow_system. - timesteps_extra: pd.DatetimeIndex - The extra timesteps of the flow_system. - hours_per_timestep: xr.DataArray - The duration of each timestep in hours. - - Class Methods: - from_file(folder: Union[str, pathlib.Path], name: str) - Create CalculationResults directly from file. - from_calculation(calculation: Calculation) - Create CalculationResults directly from a Calculation. + Attributes: + model: linopy.Model + The linopy model that was used to solve the calculation. + components: Dict[str, ComponentResults] + A dictionary of ComponentResults for each component in the flow_system. + buses: Dict[str, BusResults] + A dictionary of BusResults for each bus in the flow_system. + effects: Dict[str, EffectResults] + A dictionary of EffectResults for each effect in the flow_system. + timesteps_extra: pd.DatetimeIndex + The extra timesteps of the flow_system. + hours_per_timestep: xr.DataArray + The duration of each timestep in hours. """ @classmethod def from_file(cls, folder: Union[str, pathlib.Path], name: str): diff --git a/mkdocs.yml b/mkdocs.yml index f0a2fbb45..eb91aa475 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -37,6 +37,8 @@ nav: - Calculation: api/calculation.md + - Results: api/results.md + - Plotting: api/plotting.md - Datatypes: api/datatypes.md - Display Math example: latex-example.md - Contribute: contribute.md From d0617188088a0c822c4ae0729fc87391bb059857 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 15:58:00 +0100 Subject: [PATCH 37/87] Document the module instead of the classes --- docs/api/plotting.md | 8 +------- docs/api/results.md | 12 +----------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/docs/api/plotting.md b/docs/api/plotting.md index 714a7f2f3..1d104f137 100644 --- a/docs/api/plotting.md +++ b/docs/api/plotting.md @@ -1,7 +1 @@ -::: flixOpt.plotting.with_plotly - -::: flixOpt.plotting.heat_map_matplotlib - -::: flixOpt.plotting.heat_map_plotly - -::: flixOpt.plotting.reshape_to_2d \ No newline at end of file +::: flixOpt.plotting \ No newline at end of file diff --git a/docs/api/results.md b/docs/api/results.md index 6e1db4d38..bea01fbba 100644 --- a/docs/api/results.md +++ b/docs/api/results.md @@ -1,11 +1 @@ -::: flixOpt.results.CalculationResults - -::: flixOpt.results.SegmentedCalculationResults - -::: flixOpt.results.EffectResults - -::: flixOpt.results.BusResults - -::: flixOpt.results.ComponentResults - -::: flixOpt.results.plot_heatmap \ No newline at end of file +::: flixOpt.results \ No newline at end of file From 6406677bddd93432826b3b20753d380f8d92d253 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 16:40:45 +0100 Subject: [PATCH 38/87] Update to generated docs --- mkdocs.yml | 47 ++++++++-------------------------------- pyproject.toml | 1 + scripts/gen_ref_pages.py | 34 +++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 38 deletions(-) create mode 100644 scripts/gen_ref_pages.py diff --git a/mkdocs.yml b/mkdocs.yml index eb91aa475..c9a4492f3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -9,41 +9,6 @@ repo_url: https://github.com/flixOpt/flixopt repo_name: flixOpt/flixopt -nav: - - Home: index.md - - Getting Started: getting-started.md - - Concepts: concepts/overview.md - - Examples: examples.md - - API Reference: - - FlowSystem: api/flow-system.md - - Elements: - - Effect: api/effect.md - - Flow: api/flow.md - - Bus: api/bus.md - - Components: - - Component: api/components/component.md - - Storage: api/components/storage.md - - LinearConverter: api/components/linear-converter.md - - Transmission: api/components/transmission.md - - Source: api/components/source.md - - Sink: api/components/sink.md - - SourceAndSink: api/components/source-and-sink.md - - LinearConverter: api/components/linear-converter.md - - Subclasses of LinearConverter: api/components/linear-converters.md - - Interfaces: - - OnOffParameters: api/interfaces/on_off_parameters.md - - InvestParameters: api/interfaces/invest_parameters.md - - AggregationParameters: api/interfaces/aggregation_parameters.md - - - - Calculation: api/calculation.md - - Results: api/results.md - - Plotting: api/plotting.md - - Datatypes: api/datatypes.md - - Display Math example: latex-example.md - - Contribute: contribute.md - - theme: name: material palette: @@ -111,9 +76,14 @@ markdown_extensions: plugins: - - search # Enables the search functionality in the documentation - - table-reader # Allows including tables from external files - - mkdocstrings: # Handles automatic API documentation generation + - search # Enables the search functionality in the documentation + - table-reader # Allows including tables from external files + - gen-files: + scripts: + - scripts/gen_ref_pages.py + - literate-nav: + nav_file: SUMMARY.md + - mkdocstrings: # Handles automatic API documentation generation default_handler: python # Sets Python as the default language handlers: python: # Configuration for Python code documentation @@ -138,6 +108,7 @@ plugins: show_category_heading: true # Displays category headings (Methods, Attributes, etc.) for organization show_signature: true # Shows method signatures with parameters show_signature_annotations: true # Includes type annotations in the signatures when available + show_root_toc_entry: false # Whether to show a link to the root of the documentation in the sidebar separate_signature: true # Displays signatures separate from descriptions for cleaner layout extra: diff --git a/pyproject.toml b/pyproject.toml index 21b6291e2..c3bb26805 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,7 @@ docs = [ "mkdocstrings-python", "mkdocs-section-index", "mkdocs-table-reader-plugin", + "mkdocs-gen-files", ] [project.urls] diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py new file mode 100644 index 000000000..edfb020c7 --- /dev/null +++ b/scripts/gen_ref_pages.py @@ -0,0 +1,34 @@ +"""Generate the code reference pages and navigation.""" + +from pathlib import Path + +import mkdocs_gen_files + +nav = mkdocs_gen_files.Nav() + +root = Path(__file__).parent.parent +src = root / "flixOpt" +api_dir = "auto_api" + +for path in sorted(src.rglob("*.py")): + module_path = path.relative_to(src).with_suffix("") + doc_path = path.relative_to(src).with_suffix(".md") + full_doc_path = Path(api_dir, doc_path) + + parts = tuple(module_path.parts) + + if parts[-1] == "__init__": + parts = parts[:-1] + elif parts[-1] == "__main__": + continue + + nav[parts] = doc_path.as_posix() + + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + ident = ".".join(parts) + fd.write(f"::: {ident}") + + mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) + +with mkdocs_gen_files.open(api_dir + "/SUMMARY.md", "w") as nav_file: + nav_file.writelines(nav.build_literate_nav()) \ No newline at end of file From d62f5abd8ecc22c8531a4b9f37e6ee7290aab299 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 16:51:43 +0100 Subject: [PATCH 39/87] Update auto gen refs --- scripts/gen_ref_pages.py | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py index edfb020c7..d4453ba0a 100644 --- a/scripts/gen_ref_pages.py +++ b/scripts/gen_ref_pages.py @@ -1,12 +1,16 @@ """Generate the code reference pages and navigation.""" from pathlib import Path +import sys import mkdocs_gen_files +# Add the project root to sys.path to ensure modules can be imported +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + nav = mkdocs_gen_files.Nav() -root = Path(__file__).parent.parent src = root / "flixOpt" api_dir = "auto_api" @@ -19,16 +23,29 @@ if parts[-1] == "__init__": parts = parts[:-1] - elif parts[-1] == "__main__": + if not parts: + continue # Skip the root __init__.py + doc_path = doc_path.with_name("index.md") + full_doc_path = full_doc_path.with_name("index.md") + elif parts[-1] == "__main__" or parts[-1].startswith("_"): continue - nav[parts] = doc_path.as_posix() + # Only add to navigation if there are actual parts + if parts: + nav[parts] = doc_path.as_posix() + + # Generate documentation file - always using the flixOpt prefix + with mkdocs_gen_files.open(full_doc_path, "w") as fd: + # Use 'flixOpt.' prefix for all module references + module_id = "flixOpt." + ".".join(parts) + fd.write(f"::: {module_id}") - with mkdocs_gen_files.open(full_doc_path, "w") as fd: - ident = ".".join(parts) - fd.write(f"::: {ident}") + mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) - mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) +# Create an index file for the API reference +with mkdocs_gen_files.open(f"{api_dir}/index.md", "w") as index_file: + index_file.write("# API Reference\n\n") + index_file.write("This section contains the documentation for all modules and classes in flixOpt.\n") -with mkdocs_gen_files.open(api_dir + "/SUMMARY.md", "w") as nav_file: +with mkdocs_gen_files.open(f"{api_dir}/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) \ No newline at end of file From 70fe216c88d0c584207075c8491da29e7f1d840a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 17:20:16 +0100 Subject: [PATCH 40/87] Change to other navigation layout to navigate docs --- docs/SUMMARY.md | 8 ++++++++ docs/api/SUMMARY.md | 9 +++++++++ docs/api/components/interfaces.md | 5 ----- flixOpt/solvers.py | 15 +++++++++++++++ mkdocs.yml | 2 +- scripts/gen_ref_pages.py | 4 +++- 6 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 docs/SUMMARY.md create mode 100644 docs/api/SUMMARY.md delete mode 100644 docs/api/components/interfaces.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md new file mode 100644 index 000000000..b651bdf68 --- /dev/null +++ b/docs/SUMMARY.md @@ -0,0 +1,8 @@ +- [Home](index.md) +- [Getting Started](getting-started.md) +- [Concepts](concepts/overview.md) +- [Examples](examples.md) +- [API Reference](api/) +- [API-Auto-Docs](auto_api/) +- [Display Math example](latex-example.md) +- [Contribute](contribute.md) \ No newline at end of file diff --git a/docs/api/SUMMARY.md b/docs/api/SUMMARY.md new file mode 100644 index 000000000..413e860c2 --- /dev/null +++ b/docs/api/SUMMARY.md @@ -0,0 +1,9 @@ +- [FlowSystem](flow-system.md) +- [Flow](flow.md) +- [Bus](bus.md) +- [Effect](effect.md) +- [Components](components/) +- [Interfaces](interfaces/) +- [Results](results.md) +- [Plotting](plotting.md) +- [Datatypes](datatypes.md) diff --git a/docs/api/components/interfaces.md b/docs/api/components/interfaces.md deleted file mode 100644 index 40ac1d6d5..000000000 --- a/docs/api/components/interfaces.md +++ /dev/null @@ -1,5 +0,0 @@ -:: flixOpt.interfaces.OnOffParameters - -:: flixOpt.interfaces.InvestParameters - -::: flixOpt.aggregation.AggregationParameters diff --git a/flixOpt/solvers.py b/flixOpt/solvers.py index c9371b572..3f688d930 100644 --- a/flixOpt/solvers.py +++ b/flixOpt/solvers.py @@ -35,6 +35,13 @@ def _options(self) -> Dict[str, Any]: class GurobiSolver(_Solver): + """ + Args: + mip_gap (float): Solver's mip gap setting. The MIP gap describes the accepted (MILP) objective, + and the lower bound, which is the theoretically optimal solution (LP) + time_limit_seconds (int): Solver's time limit in seconds. + extra_options (str): Filename for saving the solver log. + """ name: ClassVar[str] = 'gurobi' @property @@ -46,6 +53,14 @@ def _options(self) -> Dict[str, Any]: class HighsSolver(_Solver): + """ + Args: + mip_gap (float): Solver's mip gap setting. The MIP gap describes the accepted (MILP) objective, + and the lower bound, which is the theoretically optimal solution (LP) + time_limit_seconds (int): Solver's time limit in seconds. + threads (int): Number of threads to use. + extra_options (str): Filename for saving the solver log. + """ threads: Optional[int] = None name: ClassVar[str] = 'highs' diff --git a/mkdocs.yml b/mkdocs.yml index c9a4492f3..e0942f155 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -94,7 +94,7 @@ plugins: docstring_section_style: table # Renders parameter sections as a table (also: list, spacy) members_order: source # Orders members as they appear in the source code - inherited_members: true # Include members inherited from parent classes + inherited_members: false # Include members inherited from parent classes show_if_no_docstring: false # Documents objects even if they don't have docstrings group_by_category: true diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py index d4453ba0a..37e4e7aac 100644 --- a/scripts/gen_ref_pages.py +++ b/scripts/gen_ref_pages.py @@ -38,7 +38,9 @@ with mkdocs_gen_files.open(full_doc_path, "w") as fd: # Use 'flixOpt.' prefix for all module references module_id = "flixOpt." + ".".join(parts) - fd.write(f"::: {module_id}") + fd.write(f"::: {module_id}\n" + f" options:\n" + f" inherited_members: true\n") mkdocs_gen_files.set_edit_path(full_doc_path, path.relative_to(root)) From 122cf61cfc0a0efdfa303205a5447d54959ddf14 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 18:19:05 +0100 Subject: [PATCH 41/87] Update the overview to only reference the API documentation --- docs/SUMMARY.md | 5 +- docs/api/calculation.md | 5 - docs/api/datatypes.md | 36 ------ docs/{api => concepts-and-math}/SUMMARY.md | 1 + docs/{api => concepts-and-math}/bus.md | 0 docs/concepts-and-math/calculation.md | 2 + .../components/component.md | 0 .../components/linear-converter.md | 0 .../components/linear-converters.md | 0 .../components/sink.md | 0 .../components/source-and-sink.md | 0 .../components/source.md | 0 .../components/storage.md | 0 .../components/transmission.md | 0 docs/{api => concepts-and-math}/effect.md | 0 .../{api => concepts-and-math}/flow-system.md | 2 - docs/{api => concepts-and-math}/flow.md | 0 .../interfaces/aggregation_parameters.md | 0 .../interfaces/invest_parameters.md | 0 .../interfaces/on_off_parameters.md | 0 docs/{api => concepts-and-math}/plotting.md | 0 docs/{api => concepts-and-math}/results.md | 0 docs/concepts/overview.md | 103 ------------------ docs/getting-started.md | 4 +- flixOpt/components.py | 51 +++++++++ mkdocs.yml | 3 + 26 files changed, 61 insertions(+), 151 deletions(-) delete mode 100644 docs/api/calculation.md delete mode 100644 docs/api/datatypes.md rename docs/{api => concepts-and-math}/SUMMARY.md (92%) rename docs/{api => concepts-and-math}/bus.md (100%) create mode 100644 docs/concepts-and-math/calculation.md rename docs/{api => concepts-and-math}/components/component.md (100%) rename docs/{api => concepts-and-math}/components/linear-converter.md (100%) rename docs/{api => concepts-and-math}/components/linear-converters.md (100%) rename docs/{api => concepts-and-math}/components/sink.md (100%) rename docs/{api => concepts-and-math}/components/source-and-sink.md (100%) rename docs/{api => concepts-and-math}/components/source.md (100%) rename docs/{api => concepts-and-math}/components/storage.md (100%) rename docs/{api => concepts-and-math}/components/transmission.md (100%) rename docs/{api => concepts-and-math}/effect.md (100%) rename docs/{api => concepts-and-math}/flow-system.md (96%) rename docs/{api => concepts-and-math}/flow.md (100%) rename docs/{api => concepts-and-math}/interfaces/aggregation_parameters.md (100%) rename docs/{api => concepts-and-math}/interfaces/invest_parameters.md (100%) rename docs/{api => concepts-and-math}/interfaces/on_off_parameters.md (100%) rename docs/{api => concepts-and-math}/plotting.md (100%) rename docs/{api => concepts-and-math}/results.md (100%) delete mode 100644 docs/concepts/overview.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index b651bdf68..18a736917 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,8 +1,7 @@ - [Home](index.md) - [Getting Started](getting-started.md) -- [Concepts](concepts/overview.md) +- [Concepts & Math](concepts-and-math/) - [Examples](examples.md) -- [API Reference](api/) -- [API-Auto-Docs](auto_api/) +- [API-Reference](auto_api/) - [Display Math example](latex-example.md) - [Contribute](contribute.md) \ No newline at end of file diff --git a/docs/api/calculation.md b/docs/api/calculation.md deleted file mode 100644 index d21a03a0c..000000000 --- a/docs/api/calculation.md +++ /dev/null @@ -1,5 +0,0 @@ -::: flixOpt.calculation.FullCalculation - -::: flixOpt.calculation.SegmentedCalculation - -::: flixOpt.calculation.AggregatedCalculation diff --git a/docs/api/datatypes.md b/docs/api/datatypes.md deleted file mode 100644 index 8880d0408..000000000 --- a/docs/api/datatypes.md +++ /dev/null @@ -1,36 +0,0 @@ - -::: flixOpt.core.Scalar - options: - show_source: true - show_if_no_docstring: true - show_signature: true - show_bases: true - show_root_heading: true - show_root_full_path: true - -::: flixOpt.core.NumericData - options: - show_source: true - show_if_no_docstring: true - show_signature: true - show_bases: true - show_root_heading: true - show_root_full_path: true - -::: flixOpt.core.NumericDataTS - options: - show_source: true - show_if_no_docstring: true - show_signature: true - show_bases: true - show_root_heading: true - show_root_full_path: true - -::: flixOpt.effects.EffectValuesUser - options: - show_source: true - show_if_no_docstring: true - show_signature: true - show_bases: true - show_root_heading: true - show_root_full_path: true \ No newline at end of file diff --git a/docs/api/SUMMARY.md b/docs/concepts-and-math/SUMMARY.md similarity index 92% rename from docs/api/SUMMARY.md rename to docs/concepts-and-math/SUMMARY.md index 413e860c2..409a5cd4b 100644 --- a/docs/api/SUMMARY.md +++ b/docs/concepts-and-math/SUMMARY.md @@ -1,3 +1,4 @@ +# Concepts & Math - [FlowSystem](flow-system.md) - [Flow](flow.md) - [Bus](bus.md) diff --git a/docs/api/bus.md b/docs/concepts-and-math/bus.md similarity index 100% rename from docs/api/bus.md rename to docs/concepts-and-math/bus.md diff --git a/docs/concepts-and-math/calculation.md b/docs/concepts-and-math/calculation.md new file mode 100644 index 000000000..080a731c1 --- /dev/null +++ b/docs/concepts-and-math/calculation.md @@ -0,0 +1,2 @@ +# Calculation + diff --git a/docs/api/components/component.md b/docs/concepts-and-math/components/component.md similarity index 100% rename from docs/api/components/component.md rename to docs/concepts-and-math/components/component.md diff --git a/docs/api/components/linear-converter.md b/docs/concepts-and-math/components/linear-converter.md similarity index 100% rename from docs/api/components/linear-converter.md rename to docs/concepts-and-math/components/linear-converter.md diff --git a/docs/api/components/linear-converters.md b/docs/concepts-and-math/components/linear-converters.md similarity index 100% rename from docs/api/components/linear-converters.md rename to docs/concepts-and-math/components/linear-converters.md diff --git a/docs/api/components/sink.md b/docs/concepts-and-math/components/sink.md similarity index 100% rename from docs/api/components/sink.md rename to docs/concepts-and-math/components/sink.md diff --git a/docs/api/components/source-and-sink.md b/docs/concepts-and-math/components/source-and-sink.md similarity index 100% rename from docs/api/components/source-and-sink.md rename to docs/concepts-and-math/components/source-and-sink.md diff --git a/docs/api/components/source.md b/docs/concepts-and-math/components/source.md similarity index 100% rename from docs/api/components/source.md rename to docs/concepts-and-math/components/source.md diff --git a/docs/api/components/storage.md b/docs/concepts-and-math/components/storage.md similarity index 100% rename from docs/api/components/storage.md rename to docs/concepts-and-math/components/storage.md diff --git a/docs/api/components/transmission.md b/docs/concepts-and-math/components/transmission.md similarity index 100% rename from docs/api/components/transmission.md rename to docs/concepts-and-math/components/transmission.md diff --git a/docs/api/effect.md b/docs/concepts-and-math/effect.md similarity index 100% rename from docs/api/effect.md rename to docs/concepts-and-math/effect.md diff --git a/docs/api/flow-system.md b/docs/concepts-and-math/flow-system.md similarity index 96% rename from docs/api/flow-system.md rename to docs/concepts-and-math/flow-system.md index d3aa8bf30..0aeec3cdc 100644 --- a/docs/api/flow-system.md +++ b/docs/concepts-and-math/flow-system.md @@ -1,7 +1,5 @@ ## Overview -::: flixOpt.flow_system.FlowSystem - ## Usage Examples ```python diff --git a/docs/api/flow.md b/docs/concepts-and-math/flow.md similarity index 100% rename from docs/api/flow.md rename to docs/concepts-and-math/flow.md diff --git a/docs/api/interfaces/aggregation_parameters.md b/docs/concepts-and-math/interfaces/aggregation_parameters.md similarity index 100% rename from docs/api/interfaces/aggregation_parameters.md rename to docs/concepts-and-math/interfaces/aggregation_parameters.md diff --git a/docs/api/interfaces/invest_parameters.md b/docs/concepts-and-math/interfaces/invest_parameters.md similarity index 100% rename from docs/api/interfaces/invest_parameters.md rename to docs/concepts-and-math/interfaces/invest_parameters.md diff --git a/docs/api/interfaces/on_off_parameters.md b/docs/concepts-and-math/interfaces/on_off_parameters.md similarity index 100% rename from docs/api/interfaces/on_off_parameters.md rename to docs/concepts-and-math/interfaces/on_off_parameters.md diff --git a/docs/api/plotting.md b/docs/concepts-and-math/plotting.md similarity index 100% rename from docs/api/plotting.md rename to docs/concepts-and-math/plotting.md diff --git a/docs/api/results.md b/docs/concepts-and-math/results.md similarity index 100% rename from docs/api/results.md rename to docs/concepts-and-math/results.md diff --git a/docs/concepts/overview.md b/docs/concepts/overview.md deleted file mode 100644 index 3ec29f5db..000000000 --- a/docs/concepts/overview.md +++ /dev/null @@ -1,103 +0,0 @@ -# flixOpt Concepts Overview - -flixOpt is built around a set of core concepts that work together to represent and optimize energy and material flow systems. This page provides a high-level overview of these concepts and how they interact. - -## Core Concepts - -![Architecture](../images/architecture_flixOpt.png) - -### FlowSystem - -The `FlowSystem` is the central organizing unit in flixOpt. It: - -- Defines the time series for the simulation -- Contains all components, buses, and flows -- Manages the effects (objectives and constraints) -- Coordinates the optimization process - -Every flixOpt model starts with creating a FlowSystem. - -### Buses - -`Bus` objects represent nodes or connection points in your system. They: - -- Balance incoming and outgoing flows -- Can represent physical networks like heat, electricity, or gas -- Handles infeasable balances gently by allowing the balance to be closed in return for a big Penalty (optional) - -### Flows - -`Flow` objects represent the movement of energy or material between components and buses. They: - -- Have a size (fixed or optimized as an investment decision) -- Can have fixed profiles (for demands or renewable generation) -- Can have constraints (min/max, total flow hours, etc.) -- Can have associated [Effects](#effects) - -### Components - -`Component` objects represent physical entities in your system that interact with flows. They include: - -- `LinearConverter` - Converts input flows to output flows with (piecewise) linear relationships -- `Storage` - Stores energy or material over time -- `Source` / `Sink` - Produce or consume flows. They are usually used to model external demands or supplies. -- `Transmission` - Moves flows between locations with possible losses -- Specialized converters like `Boiler`, `HeatPump`, `CHP`, etc. - -### Effects - -`Effect` objects represent impacts or metrics related to your system, such as: - -- Costs (investment, operation) -- Emissions (CO2, NOx, etc.) -- Resource consumption - -These can be freely defined and crosslink to each other (CO2-Emissions ---(specific CO2-costs)---> Costs). -One effect is designated as the optimization objective (typically costs), while others can have constraints. -This effect can incorporate several other effects, which woul result in a weighted objective from multiple effects. - -### Calculation Modes - -flixOpt offers different calculation approaches: - -- `FullCalculation` - Solves the entire problem at once -- `SegmentedCalculation` - Solves the problem in segments (with optioinal overlap), improving performance for large problems -- `AggregatedCalculation` - Uses typical periods to reduce computational requirements - -## How These Concepts Work Together - -1. You create a `FlowSystem` with a specified time series -2. You add elements to the FLowSystem: - - `Bus` objects as connection points - - `Component` objects like Boilers, Storages, etc.. They include `Flow` which define the connection to a Bus. - - `Effect` objects to represent costs, emissions, etc. -6. You choose a calculation mode and solver -7. flixOpt converts your model into a mathematical optimization problem -8. The solver finds the optimal solution -9. You analyze the results with built-in or external tools - -## Mathematical Foundation - -Behind the scenes, flixOpt converts your Flow System into a mixed-integer linear programming (MILP) problem: -This is done using the [linopy package](https://github.com/PyPSA/linopy). - -- Variables represent flow rates, storage levels, on/off states, etc. -- Constraints ensure physical validity (energy balance, etc.) -- The objective function represents the effect to be minimized (usually cost) - -The mathematical formulation is flexible and can incorporates: - -- Time-dependent parameters -- Investment decisions -- Binary decision variables (on/off decisions, piecewise linear relationships, ...) -- Runtime or downtime constraints -- and many more... - - - - - - - - - diff --git a/docs/getting-started.md b/docs/getting-started.md index fef6a5e67..9e87cb690 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -2,9 +2,9 @@ This guide will help you install flixOpt, understand its basic concepts, and run your first optimization model. -## Installation +# Installation -### Basic Installation +## Basic Installation Install flixOpt directly into your environment using pip: diff --git a/flixOpt/components.py b/flixOpt/components.py index 2d161ecd5..9aab5bc77 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -128,6 +128,57 @@ def degrees_of_freedom(self): @register_class_for_io class Storage(Component): + r""" + **Storages** have one incoming and one outgoing **Flow** - $f_\text{in}$ and $f_\text{out}$ - + each with an efficiency $\eta_\text{in}$ and $\eta_\text{out}$. + Further, storages have a `size` $\text C$ and a state of charge $c(\text{t}_i)$. + Similarly to the flow-rate $p(\text{t}_i)$ of a [`Flow`][flixOpt.elements.Flow], + the `size` $\text C$ combined with a relative upper bound + $\text c^{\text{U}}_\text{rel}(\text t_{i})$ and lower bound $\text c^{\text{L}}_\text{rel}(\text t_{i})$ + limits the state of charge $c(\text{t}_i)$ by $\eqref{eq:Storage_Bounds}$. + + $$ \label{eq:Storage_Bounds} + \text C \cdot \text c^{\text{L}}_{\text{rel}}(\text t_{i}) + \leq c(\text{t}_i) \leq + \text C \cdot \text c^{\text{U}}_{\text{rel}}(\text t_{i}) + $$ + + Where: + + - $\text C$ is the storage capacity + - $c(\text{t}_i)$ is the state of charge at time $\text{t}_i$ + - $\text c^{\text{L}}_{\text{rel}}(\text t_{i})$ is the relative lower bound (typically 0) + - $\text c^{\text{U}}_{\text{rel}}(\text t_{i})$ is the relative upper bound (typically 1) + + With $\text c^{\text{L}}_{\text{rel}}(\text t_{i}) = 0$ and $\text c^{\text{U}}_{\text{rel}}(\text t_{i}) = 1$, + Equation $\eqref{eq:Storage_Bounds}$ simplifies to + + $$ 0 \leq c(\text t_{i}) \leq \text C $$ + + The state of charge $c(\text{t}_i)$ decreases by a fraction of the prior state of charge. The belonging parameter + $ \dot{ \text c}_\text{rel, loss}(\text{t}_i)$ expresses the "loss fraction per hour". The storage balance from $\text{t}_i$ to $\text t_{i+1}$ is + + $$ + \begin{align*} + c(\text{t}_{i+1}) &= c(\text{t}_{i}) \cdot (1-\dot{\text{c}}_\text{rel,loss}(\text{t}_i) \cdot \Delta \text{t}_{i}) \\ + &\quad + p_{f_\text{in}}(\text{t}_i) \cdot \Delta \text{t}_i \cdot \eta_\text{in}(\text{t}_i) \\ + &\quad - \frac{p_{f_\text{out}}(\text{t}_i) \cdot \Delta \text{t}_i}{\eta_\text{out}(\text{t}_i)} + \tag{3} + \end{align*} + $$ + + Where: + + - $c(\text{t}_{i+1})$ is the state of charge at time $\text{t}_{i+1}$ + - $c(\text{t}_{i})$ is the state of charge at time $\text{t}_{i}$ + - $\dot{\text{c}}_\text{rel,loss}(\text{t}_i)$ is the relative loss rate (self-discharge) per hour + - $\Delta \text{t}_{i}$ is the time step duration in hours + - $p_{f_\text{in}}(\text{t}_i)$ is the input flow rate at time $\text{t}_i$ + - $\eta_\text{in}(\text{t}_i)$ is the charging efficiency at time $\text{t}_i$ + - $p_{f_\text{out}}(\text{t}_i)$ is the output flow rate at time $\text{t}_i$ + - $\eta_\text{out}(\text{t}_i)$ is the discharging efficiency at time $\text{t}_i$ + + """ def __init__( self, label: str, diff --git a/mkdocs.yml b/mkdocs.yml index e0942f155..c671302de 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -54,6 +54,8 @@ theme: markdown_extensions: - admonition - codehilite + - markdown_include.include: + base_path: docs - pymdownx.highlight: anchor_linenums: true line_spans: __span @@ -83,6 +85,7 @@ plugins: - scripts/gen_ref_pages.py - literate-nav: nav_file: SUMMARY.md + implicit_index: true # This makes index.md the default landing page - mkdocstrings: # Handles automatic API documentation generation default_handler: python # Sets Python as the default language handlers: From c308c13e6d3af2ccb82d8d879a846555db10964f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 19:07:12 +0100 Subject: [PATCH 42/87] Remove many files in favour of documenting the math seperately from the api --- docs/concepts-and-math/SUMMARY.md | 10 -- docs/concepts-and-math/bus.md | 1 - docs/concepts-and-math/calculation.md | 2 - .../concepts-and-math/components/component.md | 1 - .../components/linear-converter.md | 15 -- .../components/linear-converters.md | 116 -------------- docs/concepts-and-math/components/sink.md | 1 - .../components/source-and-sink.md | 1 - docs/concepts-and-math/components/source.md | 1 - docs/concepts-and-math/components/storage.md | 69 --------- .../components/transmission.md | 1 - docs/concepts-and-math/effect.md | 1 - docs/concepts-and-math/flow-system.md | 41 ----- docs/concepts-and-math/flow.md | 1 - .../interfaces/aggregation_parameters.md | 1 - .../interfaces/invest_parameters.md | 1 - .../interfaces/on_off_parameters.md | 1 - docs/concepts-and-math/plotting.md | 1 - docs/concepts-and-math/results.md | 1 - docs/getting-started.md | 143 ------------------ 20 files changed, 409 deletions(-) delete mode 100644 docs/concepts-and-math/SUMMARY.md delete mode 100644 docs/concepts-and-math/bus.md delete mode 100644 docs/concepts-and-math/calculation.md delete mode 100644 docs/concepts-and-math/components/component.md delete mode 100644 docs/concepts-and-math/components/linear-converter.md delete mode 100644 docs/concepts-and-math/components/linear-converters.md delete mode 100644 docs/concepts-and-math/components/sink.md delete mode 100644 docs/concepts-and-math/components/source-and-sink.md delete mode 100644 docs/concepts-and-math/components/source.md delete mode 100644 docs/concepts-and-math/components/storage.md delete mode 100644 docs/concepts-and-math/components/transmission.md delete mode 100644 docs/concepts-and-math/effect.md delete mode 100644 docs/concepts-and-math/flow-system.md delete mode 100644 docs/concepts-and-math/flow.md delete mode 100644 docs/concepts-and-math/interfaces/aggregation_parameters.md delete mode 100644 docs/concepts-and-math/interfaces/invest_parameters.md delete mode 100644 docs/concepts-and-math/interfaces/on_off_parameters.md delete mode 100644 docs/concepts-and-math/plotting.md delete mode 100644 docs/concepts-and-math/results.md delete mode 100644 docs/getting-started.md diff --git a/docs/concepts-and-math/SUMMARY.md b/docs/concepts-and-math/SUMMARY.md deleted file mode 100644 index 409a5cd4b..000000000 --- a/docs/concepts-and-math/SUMMARY.md +++ /dev/null @@ -1,10 +0,0 @@ -# Concepts & Math -- [FlowSystem](flow-system.md) -- [Flow](flow.md) -- [Bus](bus.md) -- [Effect](effect.md) -- [Components](components/) -- [Interfaces](interfaces/) -- [Results](results.md) -- [Plotting](plotting.md) -- [Datatypes](datatypes.md) diff --git a/docs/concepts-and-math/bus.md b/docs/concepts-and-math/bus.md deleted file mode 100644 index d758470a9..000000000 --- a/docs/concepts-and-math/bus.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.elements.Bus \ No newline at end of file diff --git a/docs/concepts-and-math/calculation.md b/docs/concepts-and-math/calculation.md deleted file mode 100644 index 080a731c1..000000000 --- a/docs/concepts-and-math/calculation.md +++ /dev/null @@ -1,2 +0,0 @@ -# Calculation - diff --git a/docs/concepts-and-math/components/component.md b/docs/concepts-and-math/components/component.md deleted file mode 100644 index 473c6d8f0..000000000 --- a/docs/concepts-and-math/components/component.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.elements.Component \ No newline at end of file diff --git a/docs/concepts-and-math/components/linear-converter.md b/docs/concepts-and-math/components/linear-converter.md deleted file mode 100644 index a399d13ca..000000000 --- a/docs/concepts-and-math/components/linear-converter.md +++ /dev/null @@ -1,15 +0,0 @@ -::: flixOpt.components.LinearConverter - -### Example Usage - -```python -import flixOpt as fx - -# Create a heat pump with COP = 3 -heat_pump = fx.LinearConverter( - label="HeatPump", - inputs=[fx.Flow(label="power_in", bus='Heat')], - outputs=[fx.Flow(label="heat_out", bus='Heat')], - conversion_factors=[{"power_in": 3, "heat_out": 1}] -) -``` diff --git a/docs/concepts-and-math/components/linear-converters.md b/docs/concepts-and-math/components/linear-converters.md deleted file mode 100644 index 2ebf9f1f5..000000000 --- a/docs/concepts-and-math/components/linear-converters.md +++ /dev/null @@ -1,116 +0,0 @@ -# Linear Converters API Reference - -The `linear_converters` module provides pre-defined specialized converters that simplify the usage of the `LinearConverter` class. Common energy system elements like boilers, heat pumps, and CHPs are predefined. -For more advanced LinearConverters, refer to the [LinearConverter API Reference](linear-converter.md). - -::: flixOpt.linear_converters.Boiler - -::: flixOpt.linear_converters.Power2Heat - -::: flixOpt.linear_converters.HeatPump - -::: flixOpt.linear_converters.HeatPumpWithSource - -::: flixOpt.linear_converters.CoolingTower - -::: flixOpt.linear_converters.CHP - - -## Examples - -### Creating a Boiler - -```python -import flixOpt as fo - -# Create buses -fuel_bus = fo.Bus("Fuel") -heat_bus = fo.Bus("Heat") - -# Create flows -fuel_flow = fo.Flow("fuel", fuel_bus) -heat_flow = fo.Flow("heat", heat_bus) - -# Create a boiler with 90% efficiency -boiler = fo.linear_converters.Boiler( - label="Boiler", - eta=0.9, # 90% thermal efficiency - Q_fu=fuel_flow, # Fuel input flow - Q_th=heat_flow # Thermal output flow -) -``` - -### Creating a Heat Pump - -```python -import flixOpt as fo - -# Create buses -electricity_bus = fo.Bus("Electricity") -heat_bus = fo.Bus("Heat") - -# Create flows -power_flow = fo.Flow("power", electricity_bus) -heat_flow = fo.Flow("heat", heat_bus) - -# Create a heat pump with COP of 3 -heat_pump = fo.linear_converters.HeatPump( - label="HeatPump", - COP=3.0, # Coefficient of Performance - P_el=power_flow, # Electrical input flow - Q_th=heat_flow # Thermal output flow -) -``` - -### Creating a CHP Unit - -```python -import flixOpt as fo - -# Create buses -fuel_bus = fo.Bus("Fuel") -electricity_bus = fo.Bus("Electricity") -heat_bus = fo.Bus("Heat") - -# Create flows -fuel_flow = fo.Flow("fuel", fuel_bus) -power_flow = fo.Flow("power", electricity_bus) -heat_flow = fo.Flow("heat", heat_bus) - -# Create a CHP unit -chp = fo.linear_converters.CHP( - label="CHP_Unit", - eta_th=0.45, # 45% thermal efficiency - eta_el=0.35, # 35% electrical efficiency - Q_fu=fuel_flow, # Fuel input flow - P_el=power_flow, # Electrical output flow - Q_th=heat_flow # Thermal output flow -) -``` - -### Creating a Heat Pump with Source - -```python -import flixOpt as fo - -# Create buses -electricity_bus = fo.Bus("Electricity") -heat_source_bus = fo.Bus("HeatSource") -heat_output_bus = fo.Bus("Heat") - -# Create flows -power_flow = fo.Flow("power", electricity_bus) -source_flow = fo.Flow("source", heat_source_bus) -heat_flow = fo.Flow("heat", heat_output_bus) - -# Create a heat pump with source -hp_with_source = fo.linear_converters.HeatPumpWithSource( - label="HeatPump", - COP=3.5, # Coefficient of Performance - P_el=power_flow, # Electrical input flow - Q_ab=source_flow, # Heat source input flow - Q_th=heat_flow # Thermal output flow -) -``` - -These pre-defined components simplify the process of building energy system models by providing specialized implementations of common energy converters. diff --git a/docs/concepts-and-math/components/sink.md b/docs/concepts-and-math/components/sink.md deleted file mode 100644 index 4b8506275..000000000 --- a/docs/concepts-and-math/components/sink.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.components.Sink \ No newline at end of file diff --git a/docs/concepts-and-math/components/source-and-sink.md b/docs/concepts-and-math/components/source-and-sink.md deleted file mode 100644 index 4f9d5c7a2..000000000 --- a/docs/concepts-and-math/components/source-and-sink.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.components.SourceAndSink \ No newline at end of file diff --git a/docs/concepts-and-math/components/source.md b/docs/concepts-and-math/components/source.md deleted file mode 100644 index 60f787b56..000000000 --- a/docs/concepts-and-math/components/source.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.components.Source \ No newline at end of file diff --git a/docs/concepts-and-math/components/storage.md b/docs/concepts-and-math/components/storage.md deleted file mode 100644 index 2d6498a37..000000000 --- a/docs/concepts-and-math/components/storage.md +++ /dev/null @@ -1,69 +0,0 @@ -::: flixOpt.components.Storage - -## Mathematical Formulation - -**Storages** have one incoming and one outgoing **Flow** - $f_\text{in}$ and $f_\text{out}$ - -each with an efficiency $\eta_\text{in}$ and $\eta_\text{out}$. -Further, storages have a `size` $\text C$ and a state of charge $c(\text{t}_i)$. -Similarly to the flow-rate $p(\text{t}_i)$ of a [`Flow`][flixOpt.elements.Flow], -the `size` $\text C$ combined with a relative upper bound -$\text c^{\text{U}}_\text{rel}(\text t_{i})$ and lower bound $\text c^{\text{L}}_\text{rel}(\text t_{i})$ -limits the state of charge $c(\text{t}_i)$ by $\eqref{eq:Storage_Bounds}$. - -$$ \label{eq:Storage_Bounds} - \text C \cdot \text c^{\text{L}}_{\text{rel}}(\text t_{i}) - \leq c(\text{t}_i) \leq - \text C \cdot \text c^{\text{U}}_{\text{rel}}(\text t_{i}) -$$ - -Where: - -- $\text C$ is the storage capacity -- $c(\text{t}_i)$ is the state of charge at time $\text{t}_i$ -- $\text c^{\text{L}}_{\text{rel}}(\text t_{i})$ is the relative lower bound (typically 0) -- $\text c^{\text{U}}_{\text{rel}}(\text t_{i})$ is the relative upper bound (typically 1) - -With $\text c^{\text{L}}_{\text{rel}}(\text t_{i}) = 0$ and $\text c^{\text{U}}_{\text{rel}}(\text t_{i}) = 1$, -Equation $\eqref{eq:Storage_Bounds}$ simplifies to - -$$ 0 \leq c(\text t_{i}) \leq \text C $$ - -The state of charge $c(\text{t}_i)$ decreases by a fraction of the prior state of charge. The belonging parameter -$ \dot{ \text c}_\text{rel, loss}(\text{t}_i)$ expresses the "loss fraction per hour". The storage balance from $\text{t}_i$ to $\text t_{i+1}$ is - -$$ -\begin{align*} - c(\text{t}_{i+1}) &= c(\text{t}_{i}) \cdot (1-\dot{\text{c}}_\text{rel,loss}(\text{t}_i) \cdot \Delta \text{t}_{i}) \\ - &\quad + p_{f_\text{in}}(\text{t}_i) \cdot \Delta \text{t}_i \cdot \eta_\text{in}(\text{t}_i) \\ - &\quad - \frac{p_{f_\text{out}}(\text{t}_i) \cdot \Delta \text{t}_i}{\eta_\text{out}(\text{t}_i)} - \tag{3} -\end{align*} -$$ - -Where: - -- $c(\text{t}_{i+1})$ is the state of charge at time $\text{t}_{i+1}$ -- $c(\text{t}_{i})$ is the state of charge at time $\text{t}_{i}$ -- $\dot{\text{c}}_\text{rel,loss}(\text{t}_i)$ is the relative loss rate (self-discharge) per hour -- $\Delta \text{t}_{i}$ is the time step duration in hours -- $p_{f_\text{in}}(\text{t}_i)$ is the input flow rate at time $\text{t}_i$ -- $\eta_\text{in}(\text{t}_i)$ is the charging efficiency at time $\text{t}_i$ -- $p_{f_\text{out}}(\text{t}_i)$ is the output flow rate at time $\text{t}_i$ -- $\eta_\text{out}(\text{t}_i)$ is the discharging efficiency at time $\text{t}_i$ - - -## Creating a Storage - -```python -import flixOpt as fx - -thermal_storage = fx.Storage( - label="ThermalStorage", - charging=fx.Flow("charging", "Wärme", size=100), - discharging=fx.Flow("discharging", "Wärme", size=100), - capacity_in_flow_hours=1000, # 1000 kWh capacity - relative_loss_per_hour=0.01, # 1% loss per hour - eta_charge=0.95, # 95% charging efficiency - eta_discharge=0.95 # 95% discharging efficiency -) -``` diff --git a/docs/concepts-and-math/components/transmission.md b/docs/concepts-and-math/components/transmission.md deleted file mode 100644 index f86c8c891..000000000 --- a/docs/concepts-and-math/components/transmission.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.components.Transmission \ No newline at end of file diff --git a/docs/concepts-and-math/effect.md b/docs/concepts-and-math/effect.md deleted file mode 100644 index f8b36b75a..000000000 --- a/docs/concepts-and-math/effect.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.effects.Effect \ No newline at end of file diff --git a/docs/concepts-and-math/flow-system.md b/docs/concepts-and-math/flow-system.md deleted file mode 100644 index 0aeec3cdc..000000000 --- a/docs/concepts-and-math/flow-system.md +++ /dev/null @@ -1,41 +0,0 @@ -## Overview - -## Usage Examples - -```python -import flixOpt as fx -import pandas as pd - - -# Create timesteps with hourly steps for one day -timesteps = pd.date_range('2023-01-01', steps=24, freq='1h') - -# Initialize the FlowSystem -flow_system = fx.FlowSystem(timesteps) - -# Add buses, components and effects -heat_bus = fx.Bus("Heat") -electricity_bus = fx.Bus("Electricity") -costs = fx.Effect("costs", "€", "Costs", is_standard=True, is_objective=True) -flow_system.add_elements(heat_bus, electricity_bus, costs) - -# You can add components with their connected flows -heat_pump = fx.linear_converters.HeatPump( - label="HeatPump", - COP=3.0, - P_el=fx.Flow("power", electricity_bus.label, effects_per_flow_hour=0.2), - Q_th=fx.Flow("heat", heat_bus.label) -) -flow_system.add_elements(heat_pump) - -# Access components and flow_system structure -print(flow_system.components) # Dictionary of all components -print(flow_system.buses) # Dictionary of all buses -print(flow_system.flows) # Dictionary of all flows - -# Visualize the flow_system network -flow_system.plot_network(show=True) - -# Save the flow_system definition -flow_system.to_json("flow_system_definition.json") -``` diff --git a/docs/concepts-and-math/flow.md b/docs/concepts-and-math/flow.md deleted file mode 100644 index e11bddc5f..000000000 --- a/docs/concepts-and-math/flow.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.elements.Flow \ No newline at end of file diff --git a/docs/concepts-and-math/interfaces/aggregation_parameters.md b/docs/concepts-and-math/interfaces/aggregation_parameters.md deleted file mode 100644 index 2c07d9183..000000000 --- a/docs/concepts-and-math/interfaces/aggregation_parameters.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.aggregation.AggregationParameters \ No newline at end of file diff --git a/docs/concepts-and-math/interfaces/invest_parameters.md b/docs/concepts-and-math/interfaces/invest_parameters.md deleted file mode 100644 index a195d98e9..000000000 --- a/docs/concepts-and-math/interfaces/invest_parameters.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.interface.InvestParameters \ No newline at end of file diff --git a/docs/concepts-and-math/interfaces/on_off_parameters.md b/docs/concepts-and-math/interfaces/on_off_parameters.md deleted file mode 100644 index 8012a19dc..000000000 --- a/docs/concepts-and-math/interfaces/on_off_parameters.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.interface.OnOffParameters \ No newline at end of file diff --git a/docs/concepts-and-math/plotting.md b/docs/concepts-and-math/plotting.md deleted file mode 100644 index 1d104f137..000000000 --- a/docs/concepts-and-math/plotting.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.plotting \ No newline at end of file diff --git a/docs/concepts-and-math/results.md b/docs/concepts-and-math/results.md deleted file mode 100644 index bea01fbba..000000000 --- a/docs/concepts-and-math/results.md +++ /dev/null @@ -1 +0,0 @@ -::: flixOpt.results \ No newline at end of file diff --git a/docs/getting-started.md b/docs/getting-started.md deleted file mode 100644 index 9e87cb690..000000000 --- a/docs/getting-started.md +++ /dev/null @@ -1,143 +0,0 @@ -# Getting Started with flixOpt - -This guide will help you install flixOpt, understand its basic concepts, and run your first optimization model. - -# Installation - -## Basic Installation - -Install flixOpt directly into your environment using pip: - -```bash -pip install git+https://github.com/flixOpt/flixOpt.git -``` - -This provides the core functionality with the HiGHS solver included. - -### Full Installation - -For all features including interactive network visualizations and time series aggregation: - -```bash -pip install "flixOpt[full] @ git+https://github.com/flixOpt/flixOpt.git" -``` - -### Development Installation - -For development purposes, clone the repository and install in editable mode: - -```bash -git clone https://github.com/flixOpt/flixOpt.git -cd flixOpt -pip install -e ".[full]" -``` - -## Basic Workflow - -Working with flixOpt follows a general pattern: - -1. **Create a FlowSystem** with a time series -2. **Define Buses** as connection points in your system -3. **Create Flows** to represent energy/material streams -4. **Add Components** like converters, storage, sources/sinks -5. **Define Effects** (costs, emissions, etc.) -6. **Run Calculations** to optimize your system -7. **Analyze Results** using built-in visualization tools - -## Simple Example - -Here's a minimal example of a simple system with a heat demand and a boiler: - -```python -import flixOpt as fo -import numpy as np - -# Create time steps - hourly for one day -time_series = fo.create_datetime_array('2023-01-01', steps=24, freq='1h') -system = fo.FlowSystem(time_series) - -# Create buses as connection points -heat_bus = fo.Bus("Heat") -fuel_bus = fo.Bus("Fuel") - -# Create a demand profile (sine wave + base load) -heat_demand_profile = 100 * np.sin(np.linspace(0, 2*np.pi, 24))**2 + 50 - -# Create flows connecting to buses -heat_demand = fo.Flow( - label="heat_demand", - bus=heat_bus, - fixed_relative_profile=heat_demand_profile # Fixed demand profile -) - -fuel_supply = fo.Flow( - label="fuel_supply", - bus=fuel_bus -) - -heat_output = fo.Flow( - label="heat_output", - bus=heat_bus -) - -# Create a boiler component -boiler = fo.linear_converters.Boiler( - label="Boiler", - eta=0.9, # 90% efficiency - Q_fu=fuel_supply, - Q_th=heat_output -) - -# Create a sink for the heat demand -heat_sink = fo.Sink( - label="Heat Demand", - sink=heat_demand -) - -# Add effects (costs) -fuel_cost = fo.Effect( - label="costs", - unit="€", - description="Operational costs", - is_objective=True # This effect will be minimized -) - -# Add elements to the system -system.add_effects(fuel_cost) -system.add_components(boiler, heat_sink) - -# Run optimization -calculation = fo.FullCalculation("Simple_Example", system) -solver = fo.HighsSolver() # Using the default solver - -# Optimize the system -calculation.do_modeling() -calculation.solve(solver, save_results=True) - -# Print results summary -print(f"Objective value: {calculation.system_model.result_of_objective}") -``` - -## Visualization - -flixOpt includes tools to visualize your results. Here's a simple example to plot flow rates: - -```python -import flixOpt.results as results - -# Load results from a previous calculation -result = results.CalculationResults("Simple_Example", "results") - -# Plot heat flows -result.plot_operation("Heat", mode="area", show=True) -``` - -## Next Steps - -Now that you've installed flixOpt and understand the basic workflow, you can: - -- Learn about the [core concepts](concepts/overview.md) -- Explore more complex [examples](examples/basic.md) -- Check the [API reference](api/flow-system.md) for detailed documentation - -For more in-depth guidance, continue to the [Concepts](concepts/overview.md) section. From bfb9d151fd6a750c84792462670dbbad70fb5973 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 19:09:00 +0100 Subject: [PATCH 43/87] Remove many files in favour of documenting the math seperately from the api --- docs/concepts-and-math/SUMMARY.md | 2 + docs/concepts-and-math/index.md | 105 +++++++++++++++++ docs/concepts-and-math/math/Flow.md | 0 docs/concepts-and-math/math/Timesteps.md | 7 ++ docs/concepts-and-math/math/index.md | 13 +++ docs/getting-started.md | 143 +++++++++++++++++++++++ 6 files changed, 270 insertions(+) create mode 100644 docs/concepts-and-math/SUMMARY.md create mode 100644 docs/concepts-and-math/index.md create mode 100644 docs/concepts-and-math/math/Flow.md create mode 100644 docs/concepts-and-math/math/Timesteps.md create mode 100644 docs/concepts-and-math/math/index.md create mode 100644 docs/getting-started.md diff --git a/docs/concepts-and-math/SUMMARY.md b/docs/concepts-and-math/SUMMARY.md new file mode 100644 index 000000000..6049dcf5a --- /dev/null +++ b/docs/concepts-and-math/SUMMARY.md @@ -0,0 +1,2 @@ +- [Concepts](index.md) +- [Mathematical Description](math/) \ No newline at end of file diff --git a/docs/concepts-and-math/index.md b/docs/concepts-and-math/index.md new file mode 100644 index 000000000..3576404b1 --- /dev/null +++ b/docs/concepts-and-math/index.md @@ -0,0 +1,105 @@ +# flixOpt Concepts & Mathematical Description + +flixOpt is built around a set of core concepts that work together to represent and optimize energy and material flow systems. This page provides a high-level overview of these concepts and how they interact. + +## Core Concepts + +### FlowSystem + +The [`FlowSystem`][flixOpt.flow_system.FlowSystem] is the central organizing unit in flixOpt. It: + +- Defines the time series for the simulation +- Contains all components, buses, and flows +- Manages the effects (objectives and constraints) + +Every flixOpt model starts with creating a FlowSystem. + +### Buses + +[`Bus`][flixOpt.elements.Bus] objects represent nodes or connection points in your system. They: + +- Balance incoming and outgoing flows +- Can represent physical networks like heat, electricity, or gas +- Handle infeasable balances gently by allowing the balance to be closed in return for a big Penalty (optional) + +### Flows + +[`Flow`][flixOpt.elements.Flow] objects represent the movement of energy or material between components and buses. They: + +- Have a size (fixed or part of an investment decision) +- Can have fixed profiles (for demands or renewable generation) +- Can have constraints (min/max, total flow hours, etc.) +- Can have [Effects](#effects) associated by their use (operation, investment, on/off, ...) + +### Components + +[`Component`][flixOpt.elements.Component] objects usually represent physical entities in your system that interact with [`Flows`][flixOpt.elements.Flow]. They include: + +- [`LinearConverters`][flixOpt.components.LinearConverter] - Converts input flows to output flows with (piecewise) linear relationships +- [`Storages`][flixOpt.components.Storage] - Stores energy or material over time +- [`Sources`][flixOpt.components.Source] / [`Sinks`][flixOpt.components.Sink] / [`SourceAndSinks`][flixOpt.components.SourceAndSink] - Produce or consume flows. They are usually used to model external demands or supplies. +- [`Transmissions`][flixOpt.components.Transmission] - Moves flows between locations with possible losses +- Specialized [`LinearConverters`][flixOpt.components.LinearConverter] like [`Boilers`][flixOpt.linear_converters.Boiler], [`HeatPumps`][flixOpt.linear_converters.HeatPump], [`CHPs`][flixOpt.linear_converters.CHP], etc. These simplify the usage of the `LinearConverter` class and can also be used as blueprint on how to define custom classes or parameterize existing ones. + +### Effects + +[`Effect`][flixOpt.effects.Effect] objects represent impacts or metrics related to your system, such as: + +- Costs (investment, operation) +- Emissions (CO₂, NOx, etc.) +- Resource consumption + +These can be freely defined and crosslink to each other (`CO₂` ──[specific CO₂-costs]─→ `Costs`). +One effect is designated as the **optimization objective** (typically Costs), while others can have constraints. +This effect can incorporate several other effects, which woul result in a weighted objective from multiple effects. + +### Calculation Modes + +flixOpt offers different calculation approaches: + +- [`FullCalculation`][flixOpt.calculation.FullCalculation] - Solves the entire problem at once +- [`SegmentedCalculation`][flixOpt.calculation.SegmentedCalculation] - Solves the problem in segments (with optioinal overlap), improving performance for large problems +- [`AggregatedCalculation`][flixOpt.calculation.AggregatedCalculation] - Uses typical periods to reduce computational requirements + +## How These Concepts Work Together + +1. You create a `FlowSystem` with a specified time series +2. You add elements to the FLowSystem: + - `Bus` objects as connection points + - `Component` objects like Boilers, Storages, etc.. They include `Flow` which define the connection to a Bus. + - `Effect` objects to represent costs, emissions, etc. +3.You choose a calculation mode and solver +4.flixOpt converts your model into a mathematical optimization problem +5.The solver finds the optimal solution +6.You analyze the results with built-in or external tools + +## Mathematical Foundation + +Behind the scenes, flixOpt converts your Flow System into a mixed-integer linear programming (MILP) problem: +This is done using the [linopy package](https://github.com/PyPSA/linopy). + +- Variables represent flow rates, storage levels, on/off states, etc. +- Constraints ensure physical validity (energy balance, etc.) +- The objective function represents the effect to be minimized (usually cost) + +The mathematical formulation is flexible and can incorporates: + +- Time-dependent parameters +- Investment decisions +- Binary decision variables (on/off decisions, piecewise linear relationships, ...) +- Runtime or downtime constraints +- and many more... + + +### Architechture (outdated) +![Architecture](../images/architecture_flixOpt.png) + + + + + + + + + + diff --git a/docs/concepts-and-math/math/Flow.md b/docs/concepts-and-math/math/Flow.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/concepts-and-math/math/Timesteps.md b/docs/concepts-and-math/math/Timesteps.md new file mode 100644 index 000000000..4f0ad442f --- /dev/null +++ b/docs/concepts-and-math/math/Timesteps.md @@ -0,0 +1,7 @@ +Time steps are defined as a sequence of discrete time steps $\text{t}_i \in \mathcal{T} \text{for} \quad i \in \{1, 2, \dots, \text{n}\}$ (left-aligned in its timespan). +From this sequence, the corresponding time intervals $\Delta \text{t}_i \in \Delta \mathcal{T}$ are derived as + +$$\Delta \text{t}_i = \text{t}_{i+1} - \text{t}_i \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}-1\}$$ + +Non-equidistant time steps are supported. +The final time interval $\Delta \text{t}_\text n$ defaults to $\Delta \text{t}_\text n = \Delta \text{t}_{\text n-1}$, but is of course customizable. diff --git a/docs/concepts-and-math/math/index.md b/docs/concepts-and-math/math/index.md new file mode 100644 index 000000000..0427d9ea1 --- /dev/null +++ b/docs/concepts-and-math/math/index.md @@ -0,0 +1,13 @@ +# Mathematical Description of flixOpt + +Here you will find a detailed description of the mathematical foundation of flixOpt. +To get started, check out the [Mathematical Notation & Naming Conventions](math.md#mathematical-notation--naming-conventions) section. + +## Mathematical Notation & Naming Conventions + +flixOpt uses the following naming conventions: + +- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) +- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) +- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) +- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 000000000..fef6a5e67 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,143 @@ +# Getting Started with flixOpt + +This guide will help you install flixOpt, understand its basic concepts, and run your first optimization model. + +## Installation + +### Basic Installation + +Install flixOpt directly into your environment using pip: + +```bash +pip install git+https://github.com/flixOpt/flixOpt.git +``` + +This provides the core functionality with the HiGHS solver included. + +### Full Installation + +For all features including interactive network visualizations and time series aggregation: + +```bash +pip install "flixOpt[full] @ git+https://github.com/flixOpt/flixOpt.git" +``` + +### Development Installation + +For development purposes, clone the repository and install in editable mode: + +```bash +git clone https://github.com/flixOpt/flixOpt.git +cd flixOpt +pip install -e ".[full]" +``` + +## Basic Workflow + +Working with flixOpt follows a general pattern: + +1. **Create a FlowSystem** with a time series +2. **Define Buses** as connection points in your system +3. **Create Flows** to represent energy/material streams +4. **Add Components** like converters, storage, sources/sinks +5. **Define Effects** (costs, emissions, etc.) +6. **Run Calculations** to optimize your system +7. **Analyze Results** using built-in visualization tools + +## Simple Example + +Here's a minimal example of a simple system with a heat demand and a boiler: + +```python +import flixOpt as fo +import numpy as np + +# Create time steps - hourly for one day +time_series = fo.create_datetime_array('2023-01-01', steps=24, freq='1h') +system = fo.FlowSystem(time_series) + +# Create buses as connection points +heat_bus = fo.Bus("Heat") +fuel_bus = fo.Bus("Fuel") + +# Create a demand profile (sine wave + base load) +heat_demand_profile = 100 * np.sin(np.linspace(0, 2*np.pi, 24))**2 + 50 + +# Create flows connecting to buses +heat_demand = fo.Flow( + label="heat_demand", + bus=heat_bus, + fixed_relative_profile=heat_demand_profile # Fixed demand profile +) + +fuel_supply = fo.Flow( + label="fuel_supply", + bus=fuel_bus +) + +heat_output = fo.Flow( + label="heat_output", + bus=heat_bus +) + +# Create a boiler component +boiler = fo.linear_converters.Boiler( + label="Boiler", + eta=0.9, # 90% efficiency + Q_fu=fuel_supply, + Q_th=heat_output +) + +# Create a sink for the heat demand +heat_sink = fo.Sink( + label="Heat Demand", + sink=heat_demand +) + +# Add effects (costs) +fuel_cost = fo.Effect( + label="costs", + unit="€", + description="Operational costs", + is_objective=True # This effect will be minimized +) + +# Add elements to the system +system.add_effects(fuel_cost) +system.add_components(boiler, heat_sink) + +# Run optimization +calculation = fo.FullCalculation("Simple_Example", system) +solver = fo.HighsSolver() # Using the default solver + +# Optimize the system +calculation.do_modeling() +calculation.solve(solver, save_results=True) + +# Print results summary +print(f"Objective value: {calculation.system_model.result_of_objective}") +``` + +## Visualization + +flixOpt includes tools to visualize your results. Here's a simple example to plot flow rates: + +```python +import flixOpt.results as results + +# Load results from a previous calculation +result = results.CalculationResults("Simple_Example", "results") + +# Plot heat flows +result.plot_operation("Heat", mode="area", show=True) +``` + +## Next Steps + +Now that you've installed flixOpt and understand the basic workflow, you can: + +- Learn about the [core concepts](concepts/overview.md) +- Explore more complex [examples](examples/basic.md) +- Check the [API reference](api/flow-system.md) for detailed documentation + +For more in-depth guidance, continue to the [Concepts](concepts/overview.md) section. From e800fb4cb7e60e6488e04ee37b1f76f696c8fbe5 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 19:28:07 +0100 Subject: [PATCH 44/87] Simplify the docs --- docs/concepts-and-math/SUMMARY.md | 2 -- docs/concepts-and-math/index.md | 34 +++++++++++++++--------- docs/concepts-and-math/math/Flow.md | 0 docs/concepts-and-math/math/Timesteps.md | 7 ----- docs/concepts-and-math/math/index.md | 13 --------- flixOpt/elements.py | 2 -- 6 files changed, 21 insertions(+), 37 deletions(-) delete mode 100644 docs/concepts-and-math/SUMMARY.md delete mode 100644 docs/concepts-and-math/math/Flow.md delete mode 100644 docs/concepts-and-math/math/Timesteps.md delete mode 100644 docs/concepts-and-math/math/index.md diff --git a/docs/concepts-and-math/SUMMARY.md b/docs/concepts-and-math/SUMMARY.md deleted file mode 100644 index 6049dcf5a..000000000 --- a/docs/concepts-and-math/SUMMARY.md +++ /dev/null @@ -1,2 +0,0 @@ -- [Concepts](index.md) -- [Mathematical Description](math/) \ No newline at end of file diff --git a/docs/concepts-and-math/index.md b/docs/concepts-and-math/index.md index 3576404b1..f8c3dbac1 100644 --- a/docs/concepts-and-math/index.md +++ b/docs/concepts-and-math/index.md @@ -14,6 +14,16 @@ The [`FlowSystem`][flixOpt.flow_system.FlowSystem] is the central organizing uni Every flixOpt model starts with creating a FlowSystem. +### Timesteps +Time steps are defined as a sequence of discrete time steps $\text{t}_i \in \mathcal{T} \text{for} \quad i \in \{1, 2, \dots, \text{n}\}$ (left-aligned in its timespan). +From this sequence, the corresponding time intervals $\Delta \text{t}_i \in \Delta \mathcal{T}$ are derived as + +$$\Delta \text{t}_i = \text{t}_{i+1} - \text{t}_i \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}-1\}$$ + +Non-equidistant time steps are supported. +The final time interval $\Delta \text{t}_\text n$ defaults to $\Delta \text{t}_\text n = \Delta \text{t}_{\text n-1}$, but is of course customizable. + + ### Buses [`Bus`][flixOpt.elements.Bus] objects represent nodes or connection points in your system. They: @@ -73,25 +83,23 @@ flixOpt offers different calculation approaches: 5.The solver finds the optimal solution 6.You analyze the results with built-in or external tools -## Mathematical Foundation +## Advanced Usage +flixOpt uses [linopy](https://github.com/PyPSA/linopy) to model the mathematical optimization problem. +Any model created with flixOpt can be extended or modified using the great [linopy API](https://linopy.readthedocs.io/en/latest/api.html). +This allows to adjust your model to very specific requirements without loosing the convenience of flixOpt. -Behind the scenes, flixOpt converts your Flow System into a mixed-integer linear programming (MILP) problem: -This is done using the [linopy package](https://github.com/PyPSA/linopy). -- Variables represent flow rates, storage levels, on/off states, etc. -- Constraints ensure physical validity (energy balance, etc.) -- The objective function represents the effect to be minimized (usually cost) +## Mathematical Notation & Naming Conventions -The mathematical formulation is flexible and can incorporates: +flixOpt uses the following naming conventions: -- Time-dependent parameters -- Investment decisions -- Binary decision variables (on/off decisions, piecewise linear relationships, ...) -- Runtime or downtime constraints -- and many more... +- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) +- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) +- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) +- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) -### Architechture (outdated) +## Architechture (outdated) ![Architecture](../images/architecture_flixOpt.png) diff --git a/docs/concepts-and-math/math/Flow.md b/docs/concepts-and-math/math/Flow.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/concepts-and-math/math/Timesteps.md b/docs/concepts-and-math/math/Timesteps.md deleted file mode 100644 index 4f0ad442f..000000000 --- a/docs/concepts-and-math/math/Timesteps.md +++ /dev/null @@ -1,7 +0,0 @@ -Time steps are defined as a sequence of discrete time steps $\text{t}_i \in \mathcal{T} \text{for} \quad i \in \{1, 2, \dots, \text{n}\}$ (left-aligned in its timespan). -From this sequence, the corresponding time intervals $\Delta \text{t}_i \in \Delta \mathcal{T}$ are derived as - -$$\Delta \text{t}_i = \text{t}_{i+1} - \text{t}_i \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}-1\}$$ - -Non-equidistant time steps are supported. -The final time interval $\Delta \text{t}_\text n$ defaults to $\Delta \text{t}_\text n = \Delta \text{t}_{\text n-1}$, but is of course customizable. diff --git a/docs/concepts-and-math/math/index.md b/docs/concepts-and-math/math/index.md deleted file mode 100644 index 0427d9ea1..000000000 --- a/docs/concepts-and-math/math/index.md +++ /dev/null @@ -1,13 +0,0 @@ -# Mathematical Description of flixOpt - -Here you will find a detailed description of the mathematical foundation of flixOpt. -To get started, check out the [Mathematical Notation & Naming Conventions](math.md#mathematical-notation--naming-conventions) section. - -## Mathematical Notation & Naming Conventions - -flixOpt uses the following naming conventions: - -- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) -- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) -- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) -- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) diff --git a/flixOpt/elements.py b/flixOpt/elements.py index 60f669a29..2df78d6a5 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -159,8 +159,6 @@ def __init__(self): @register_class_for_io class Flow(Element): r""" - Flows are the inputs and outputs of [Component][flixOpt.elements.Component] - and connect them to [Busses][flixOpt.elements.Bus]. A **Flow** moves energy (or material) between a [Bus][flixOpt.elements.Bus] and a [Component][flixOpt.elements.Component] in a predefined direction. The flow-rate $p(\text{t}_{i})$ is the main optimization variable of the **Flow**. The size $\text P$ of the **Flow** combined with a relative upper bound $\text p_{\text{rel}}^{\text{U}}(\text{t}_{i})$ From e2096d62f35ae10a970bced3b1107dcb31ec3c29 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 19:28:28 +0100 Subject: [PATCH 45/87] Simplify the docs --- flixOpt/elements.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixOpt/elements.py b/flixOpt/elements.py index 2df78d6a5..5f94de868 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -164,14 +164,14 @@ class Flow(Element): The size $\text P$ of the **Flow** combined with a relative upper bound $\text p_{\text{rel}}^{\text{U}}(\text{t}_{i})$ and lower bound $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i})$ limits the flow-rate per time step $p(\text{t}_{i})$. - $$ + $$ \label{eq:flow_rate} \text P \cdot \text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) \leq p(\text{t}_{i}) \leq \text P \cdot \text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) \tag{1} $$ With $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) = 0$ and $\text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) = 1$, - equation (1) simplifies to + equation \eqref{eq:flow_rate} simplifies to $$ 0 \leq p(\text{t}_{i}) \leq \text P From 0a19a054785cdae723d2aca544ef1c0e4d3b52e1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 19:44:07 +0100 Subject: [PATCH 46/87] Simplify the docs --- docs/SUMMARY.md | 4 +- docs/examples.md | 1 - docs/examples/index.md | 4 ++ docs/getting-started.md | 119 +++------------------------------------ scripts/gen_ref_pages.py | 2 +- 5 files changed, 16 insertions(+), 114 deletions(-) delete mode 100644 docs/examples.md create mode 100644 docs/examples/index.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 18a736917..f782c277a 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,7 @@ - [Home](index.md) - [Getting Started](getting-started.md) - [Concepts & Math](concepts-and-math/) -- [Examples](examples.md) -- [API-Reference](auto_api/) +- [Examples](examples/) +- [API-Reference](api-reference/) - [Display Math example](latex-example.md) - [Contribute](contribute.md) \ No newline at end of file diff --git a/docs/examples.md b/docs/examples.md deleted file mode 100644 index f57f1d405..000000000 --- a/docs/examples.md +++ /dev/null @@ -1 +0,0 @@ -TODO: Add examples \ No newline at end of file diff --git a/docs/examples/index.md b/docs/examples/index.md new file mode 100644 index 000000000..9af1e3600 --- /dev/null +++ b/docs/examples/index.md @@ -0,0 +1,4 @@ +# Examples + +TODO: Add examples +For now, please look into out Github examples folder. \ No newline at end of file diff --git a/docs/getting-started.md b/docs/getting-started.md index fef6a5e67..b086c5fed 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -22,122 +22,21 @@ For all features including interactive network visualizations and time series ag pip install "flixOpt[full] @ git+https://github.com/flixOpt/flixOpt.git" ``` -### Development Installation - -For development purposes, clone the repository and install in editable mode: - -```bash -git clone https://github.com/flixOpt/flixOpt.git -cd flixOpt -pip install -e ".[full]" -``` - ## Basic Workflow Working with flixOpt follows a general pattern: -1. **Create a FlowSystem** with a time series -2. **Define Buses** as connection points in your system -3. **Create Flows** to represent energy/material streams -4. **Add Components** like converters, storage, sources/sinks -5. **Define Effects** (costs, emissions, etc.) -6. **Run Calculations** to optimize your system -7. **Analyze Results** using built-in visualization tools - -## Simple Example - -Here's a minimal example of a simple system with a heat demand and a boiler: - -```python -import flixOpt as fo -import numpy as np - -# Create time steps - hourly for one day -time_series = fo.create_datetime_array('2023-01-01', steps=24, freq='1h') -system = fo.FlowSystem(time_series) - -# Create buses as connection points -heat_bus = fo.Bus("Heat") -fuel_bus = fo.Bus("Fuel") - -# Create a demand profile (sine wave + base load) -heat_demand_profile = 100 * np.sin(np.linspace(0, 2*np.pi, 24))**2 + 50 - -# Create flows connecting to buses -heat_demand = fo.Flow( - label="heat_demand", - bus=heat_bus, - fixed_relative_profile=heat_demand_profile # Fixed demand profile -) - -fuel_supply = fo.Flow( - label="fuel_supply", - bus=fuel_bus -) - -heat_output = fo.Flow( - label="heat_output", - bus=heat_bus -) - -# Create a boiler component -boiler = fo.linear_converters.Boiler( - label="Boiler", - eta=0.9, # 90% efficiency - Q_fu=fuel_supply, - Q_th=heat_output -) - -# Create a sink for the heat demand -heat_sink = fo.Sink( - label="Heat Demand", - sink=heat_demand -) - -# Add effects (costs) -fuel_cost = fo.Effect( - label="costs", - unit="€", - description="Operational costs", - is_objective=True # This effect will be minimized -) - -# Add elements to the system -system.add_effects(fuel_cost) -system.add_components(boiler, heat_sink) - -# Run optimization -calculation = fo.FullCalculation("Simple_Example", system) -solver = fo.HighsSolver() # Using the default solver - -# Optimize the system -calculation.do_modeling() -calculation.solve(solver, save_results=True) - -# Print results summary -print(f"Objective value: {calculation.system_model.result_of_objective}") -``` - -## Visualization - -flixOpt includes tools to visualize your results. Here's a simple example to plot flow rates: - -```python -import flixOpt.results as results - -# Load results from a previous calculation -result = results.CalculationResults("Simple_Example", "results") - -# Plot heat flows -result.plot_operation("Heat", mode="area", show=True) -``` +1. **Create a FlowSystem** with corresponding timesteps +2. **Define Effects** (costs, emissions, etc.) +3. **Define Buses** as connection points in your system +4. **Add Components** like converters, storage, sources/sinks with their Flows +5. **Run Calculations** to optimize your system +6. **Analyze Results** using built-in or external visualization tools ## Next Steps Now that you've installed flixOpt and understand the basic workflow, you can: -- Learn about the [core concepts](concepts/overview.md) -- Explore more complex [examples](examples/basic.md) -- Check the [API reference](api/flow-system.md) for detailed documentation - -For more in-depth guidance, continue to the [Concepts](concepts/overview.md) section. +- Learn about the [core concepts of flixOpt](concepts-and-math/index.md) +- Explore some [examples](examples/index.md) +- Check the [API reference](api-reference/index.md) for detailed documentation diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py index 37e4e7aac..002086ae5 100644 --- a/scripts/gen_ref_pages.py +++ b/scripts/gen_ref_pages.py @@ -12,7 +12,7 @@ nav = mkdocs_gen_files.Nav() src = root / "flixOpt" -api_dir = "auto_api" +api_dir = "api-reference" for path in sorted(src.rglob("*.py")): module_path = path.relative_to(src).with_suffix("") From 4285fbdab7735a6202d70047c3ce8766786cda4c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 19:50:01 +0100 Subject: [PATCH 47/87] Improve the docs --- docs/concepts-and-math/index.md | 2 +- docs/getting-started.md | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/concepts-and-math/index.md b/docs/concepts-and-math/index.md index f8c3dbac1..bd38c9fd9 100644 --- a/docs/concepts-and-math/index.md +++ b/docs/concepts-and-math/index.md @@ -15,7 +15,7 @@ The [`FlowSystem`][flixOpt.flow_system.FlowSystem] is the central organizing uni Every flixOpt model starts with creating a FlowSystem. ### Timesteps -Time steps are defined as a sequence of discrete time steps $\text{t}_i \in \mathcal{T} \text{for} \quad i \in \{1, 2, \dots, \text{n}\}$ (left-aligned in its timespan). +Time steps are defined as a sequence of discrete time steps $\text{t}_i \in \mathcal{T} \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}\}$ (left-aligned in its timespan). From this sequence, the corresponding time intervals $\Delta \text{t}_i \in \Delta \mathcal{T}$ are derived as $$\Delta \text{t}_i = \text{t}_{i+1} - \text{t}_i \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}-1\}$$ diff --git a/docs/getting-started.md b/docs/getting-started.md index b086c5fed..cbc28f58f 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -26,12 +26,12 @@ pip install "flixOpt[full] @ git+https://github.com/flixOpt/flixOpt.git" Working with flixOpt follows a general pattern: -1. **Create a FlowSystem** with corresponding timesteps -2. **Define Effects** (costs, emissions, etc.) -3. **Define Buses** as connection points in your system -4. **Add Components** like converters, storage, sources/sinks with their Flows -5. **Run Calculations** to optimize your system -6. **Analyze Results** using built-in or external visualization tools +1. **Create a [`FlowSystem`][flixOpt.flow_system.FlowSystem]** with a time series +2. **Define [`Effects`][flixOpt.effects.Effect]** (costs, emissions, etc.) +3. **Define [`Buses`][flixOpt.elements.Bus]** as connection points in your system +4. **Add [`Components`][flixOpt.components]** like converters, storage, sources/sinks with their Flows +5. **Run [`Calculations`][flixOpt.calculation]** to optimize your system +6. **Analyze [`Results`][flixOpt.results]** using built-in or external visualization tools ## Next Steps From 2abaf4337bf7cee00772488adcaf875cd58aab4d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 20:03:52 +0100 Subject: [PATCH 48/87] Add examples to the docs --- docs/examples/index.md | 30 ++++++++++++++++++++++++++++-- mkdocs.yml | 3 +++ pyproject.toml | 1 + 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/docs/examples/index.md b/docs/examples/index.md index 9af1e3600..c7a3d057b 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -1,4 +1,30 @@ # Examples -TODO: Add examples -For now, please look into out Github examples folder. \ No newline at end of file +## Minimal Example + +```python +{! ../examples/00_Minmal/minimal_example.py !} +``` + +## Simple example + +```python +{! ../examples/01_Simple/simple_example.py !} +``` + +## Complex example +This saves the results of a calculation to file and reloads them to analyze the results +### Build the Model +```python +{! ../examples/02_Complex/complex_example.py !} +``` +### Load the Results from file +```python +{! ../examples/02_Complex/complex_example_results.py !} +``` + +## Calculation Mode comparison +**Note:** This example relies on time series data. You can find it in the `examples` folder of the flixOpt repository. +```python +{! ../examples/03_Calculation_types/example_calculation_types.py !} +``` diff --git a/mkdocs.yml b/mkdocs.yml index c671302de..44e5b08c8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -75,11 +75,14 @@ markdown_extensions: - pymdownx.emoji: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.snippets: + base_path: .. plugins: - search # Enables the search functionality in the documentation - table-reader # Allows including tables from external files + - include-markdown - gen-files: scripts: - scripts/gen_ref_pages.py diff --git a/pyproject.toml b/pyproject.toml index c3bb26805..d9d827c2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,7 @@ docs = [ "mkdocs-section-index", "mkdocs-table-reader-plugin", "mkdocs-gen-files", + "mkdocs-include-markdown-plugin" ] [project.urls] From a93f15596bdc1277d44120deaebd8663cc9d1d1d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 20:20:59 +0100 Subject: [PATCH 49/87] Bugfixes in docs --- docs/SUMMARY.md | 4 ++-- docs/contribute.md | 2 +- docs/index.md | 7 ------- flixOpt/elements.py | 4 ++-- flixOpt/structure.py | 2 +- 5 files changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index f782c277a..c4a9946e9 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,7 @@ - [Home](index.md) - [Getting Started](getting-started.md) -- [Concepts & Math](concepts-and-math/) -- [Examples](examples/) +- [Concepts & Math](concepts-and-math/index.md) +- [Examples](examples/index.md) - [API-Reference](api-reference/) - [Display Math example](latex-example.md) - [Contribute](contribute.md) \ No newline at end of file diff --git a/docs/contribute.md b/docs/contribute.md index 96182ffdc..1ee9a71a2 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -43,7 +43,7 @@ At some point, `next/minor` or `next/major` will get merged into `main` using a ## Releases As stated, we follow **Semantic Versioning**. -Right after one of the 3 [release branches](#Branches) is merged into main, a **Tag** should be added to the merge commit and pushed to the main branch. The tag has the form `v1.2.3`. +Right after one of the 3 [release branches](#branches) is merged into main, a **Tag** should be added to the merge commit and pushed to the main branch. The tag has the form `v1.2.3`. With this tag, a release with **Release Notes** must be created. *This is our current best practice* diff --git a/docs/index.md b/docs/index.md index 639e48ae9..1e89912eb 100644 --- a/docs/index.md +++ b/docs/index.md @@ -82,13 +82,6 @@ system.add_components(heat_pump) flixOpt transforms your energy system model into a mathematical optimization problem, solves it using state-of-the-art solvers, and returns the optimal operation strategy and investment decisions. -## 📚 Documentation - -- [Getting Started](getting-started.md) - Installation and first steps -- [Concepts](concepts/overview.md) - Core concepts and architecture -- [Examples](examples/basic.md) - Usage examples -- [API Reference](api/flow-system.md) - Full API documentation - ## 🛠️ Compatible Solvers flixOpt works with various solvers: diff --git a/flixOpt/elements.py b/flixOpt/elements.py index 5f94de868..71de7f711 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -70,7 +70,7 @@ def transform_data(self, flow_system: 'FlowSystem') -> None: if self.on_off_parameters is not None: self.on_off_parameters.transform_data(flow_system, self.label_full) - def infos(self, use_numpy=True, use_element_label=False) -> Dict: + def infos(self, use_numpy=True, use_element_label: bool = False) -> Dict: infos = super().infos(use_numpy, use_element_label) infos['inputs'] = [flow.infos(use_numpy, use_element_label) for flow in self.inputs] infos['outputs'] = [flow.infos(use_numpy, use_element_label) for flow in self.outputs] @@ -289,7 +289,7 @@ def transform_data(self, flow_system: 'FlowSystem'): if isinstance(self.size, InvestParameters): self.size.transform_data(flow_system) - def infos(self, use_numpy=True, use_element_label=False) -> Dict: + def infos(self, use_numpy: bool = True, use_element_label: bool = False) -> Dict: infos = super().infos(use_numpy, use_element_label) infos['is_input_in_component'] = self.is_input_in_component return infos diff --git a/flixOpt/structure.py b/flixOpt/structure.py index 7d51090a8..26a336738 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -92,7 +92,7 @@ def transform_data(self, flow_system: 'FlowSystem'): """ Transforms the data of the interface to match the FlowSystem's dimensions""" raise NotImplementedError('Every Interface needs a transform_data() method') - def infos(self, use_numpy=True, use_element_label=False) -> Dict: + def infos(self, use_numpy: bool =True, use_element_label: bool = False) -> Dict: """ Generate a dictionary representation of the object's constructor arguments. Excludes default values and empty dictionaries and lists. From a96fc2f9cf587315717f1b0aa10e1481288b4a37 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 20:31:19 +0100 Subject: [PATCH 50/87] Improve --- docs/contribute.md | 2 +- scripts/gen_ref_pages.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/contribute.md b/docs/contribute.md index 1ee9a71a2..d4535c0c0 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -34,7 +34,7 @@ Following the **Semantic Versioning** guidelines, we introduced: - `next/minor`: This is where all pull requests for the next minor release (1.x.0) go. - `next/major`: This is where all pull requests for the next major release (x.0.0) go. -- Everything else remains in `feature/...`-branches. +Everything else remains in `feature/...`-branches. ## Pull requests Every feature or bugfix should be merged into one of the 3 [release branches](#branches), using **Squash and merge** or a regular **single commit**. diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py index 002086ae5..8eda70037 100644 --- a/scripts/gen_ref_pages.py +++ b/scripts/gen_ref_pages.py @@ -47,7 +47,9 @@ # Create an index file for the API reference with mkdocs_gen_files.open(f"{api_dir}/index.md", "w") as index_file: index_file.write("# API Reference\n\n") - index_file.write("This section contains the documentation for all modules and classes in flixOpt.\n") + index_file.write( + f"This section contains the documentation for all modules and classes in flixOpt.\n" + f"For more information on how to use the classes and functions, see the [Concepts & Math](../concepts-and-math/index.md) section.\n") with mkdocs_gen_files.open(f"{api_dir}/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) \ No newline at end of file From 7d420f3e0416dcb0071e6b92db4db02b16dbc65e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 16 Mar 2025 21:37:42 +0100 Subject: [PATCH 51/87] Add mathematics to docs --- docs/concepts-and-math/index.md | 76 ++++++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/docs/concepts-and-math/index.md b/docs/concepts-and-math/index.md index bd38c9fd9..16be7e1e6 100644 --- a/docs/concepts-and-math/index.md +++ b/docs/concepts-and-math/index.md @@ -2,17 +2,27 @@ flixOpt is built around a set of core concepts that work together to represent and optimize energy and material flow systems. This page provides a high-level overview of these concepts and how they interact. +## Mathematical Notation & Naming Conventions + +flixOpt uses the following naming conventions: + +- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) +- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) +- All Sets are denoted by greek capital letters (e.g., $\mathcal{F}$, $\mathcal{E}$) +- All units of a set are denoted by greek small letters (e.g., $\mathcal{f}$, $\mathcal{e}$) +- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) +- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) + ## Core Concepts ### FlowSystem -The [`FlowSystem`][flixOpt.flow_system.FlowSystem] is the central organizing unit in flixOpt. It: - -- Defines the time series for the simulation -- Contains all components, buses, and flows -- Manages the effects (objectives and constraints) +The [`FlowSystem`][flixOpt.flow_system.FlowSystem] is the central organizing unit in flixOpt. +Every flixOpt model starts with creating a FlowSystem. It: -Every flixOpt model starts with creating a FlowSystem. +- Defines the timesteps for the optimization +- Contains and connects [components](#components), [buses](#buses), and [flows](#flows) +- Manages the [effects](#effects) (objectives and constraints) ### Timesteps Time steps are defined as a sequence of discrete time steps $\text{t}_i \in \mathcal{T} \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}\}$ (left-aligned in its timespan). @@ -20,17 +30,53 @@ From this sequence, the corresponding time intervals $\Delta \text{t}_i \in \Del $$\Delta \text{t}_i = \text{t}_{i+1} - \text{t}_i \quad \text{for} \quad i \in \{1, 2, \dots, \text{n}-1\}$$ -Non-equidistant time steps are supported. The final time interval $\Delta \text{t}_\text n$ defaults to $\Delta \text{t}_\text n = \Delta \text{t}_{\text n-1}$, but is of course customizable. - +Non-equidistant time steps are also supported. ### Buses -[`Bus`][flixOpt.elements.Bus] objects represent nodes or connection points in your system. They: +[`Bus`][flixOpt.elements.Bus] objects represent nodes or connection points in a FlowSystem. They: - Balance incoming and outgoing flows - Can represent physical networks like heat, electricity, or gas -- Handle infeasable balances gently by allowing the balance to be closed in return for a big Penalty (optional) +- Handle infeasible balances gently by allowing the balance to be closed in return for a big Penalty (optional) + +#### Mathematical Notation + +The balance equation for a bus is: + +$$ \label{eq:bus_balance} + \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_\text{in}}(\text{t}_i) = + \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) +$$ + +Optionally, a Bus can have a `excess_penalty_per_flow_hour` parameter, which allows to penalize the balance for missing or excess flow-rates. +This is usefull as it handles a possible ifeasiblity gently. + +This changes the balance to + +$$ \label{eq:bus_balance-excess} + \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_ \text{in}}(\text{t}_i) + \phi_\text{in}(\text{t}_i) = + \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) + \phi_\text{out}(\text{t}_i) +$$ + +The penalty term is defined as + +$$ \label{eq:bus_penalty} + s_{b \rightarrow \Phi}(\text{t}_i) = + \text a_{b \rightarrow \Phi}(\text{t}_i) \cdot \Delta \text{t}_i + \cdot [ \phi_\text{in}(\text{t}_i) + \phi_\text{out}(\text{t}_i) ] +$$ + +With: + +- $\mathcal{F}_\text{in}$ and $\mathcal{F}_\text{out}$ being the set of all incoming and outgoing flows +- $p_{f_\text{in}}(\text{t}_i)$ and $p_{f_\text{out}}(\text{t}_i)$ being the flow-rate at time $\text{t}_i$ for flow $f_\text{in}$ and $f_\text{out}$, respectively +- $\phi_\text{in}(\text{t}_i)$ and $\phi_\text{out}(\text{t}_i)$ being the missing or excess flow-rate at time $\text{t}_i$, respectively +- $\text{t}_i$ being the time step +- $s_{b \rightarrow \Phi}(\text{t}_i)$ being the penalty term +- $\text a_{b \rightarrow \Phi}(\text{t}_i)$ being the penalty coefficient (`excess_penalty_per_flow_hour`) + ### Flows @@ -89,16 +135,6 @@ Any model created with flixOpt can be extended or modified using the great [lino This allows to adjust your model to very specific requirements without loosing the convenience of flixOpt. -## Mathematical Notation & Naming Conventions - -flixOpt uses the following naming conventions: - -- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) -- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) -- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) -- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) - - ## Architechture (outdated) ![Architecture](../images/architecture_flixOpt.png) From 8b01e9189da97c6ef0bb07a5a7a5e30d30fcec34 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 09:12:37 +0100 Subject: [PATCH 52/87] Improve example docs adn remove math from docstrings --- docs/SUMMARY.md | 5 +- .../Mathematical Description.md | 108 ++++++++++++++++++ docs/concepts-and-math/index.md | 60 ++-------- docs/examples/index.md | 30 ----- docs/latex-example.md | 106 ----------------- flixOpt/elements.py | 28 +---- 6 files changed, 118 insertions(+), 219 deletions(-) create mode 100644 docs/concepts-and-math/Mathematical Description.md delete mode 100644 docs/examples/index.md delete mode 100644 docs/latex-example.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index c4a9946e9..c270941af 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,6 @@ - [Home](index.md) - [Getting Started](getting-started.md) -- [Concepts & Math](concepts-and-math/index.md) -- [Examples](examples/index.md) +- [Concepts & Math](concepts-and-math/) +- [Examples](examples/) - [API-Reference](api-reference/) -- [Display Math example](latex-example.md) - [Contribute](contribute.md) \ No newline at end of file diff --git a/docs/concepts-and-math/Mathematical Description.md b/docs/concepts-and-math/Mathematical Description.md new file mode 100644 index 000000000..34549a2b9 --- /dev/null +++ b/docs/concepts-and-math/Mathematical Description.md @@ -0,0 +1,108 @@ + +# Mathematical Notation + +## Naming Conventions + +flixOpt uses the following naming conventions: + +- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) +- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) +- All Sets are denoted by greek capital letters (e.g., $\mathcal{F}$, $\mathcal{E}$) +- All units of a set are denoted by greek small letters (e.g., $\mathcal{f}$, $\mathcal{e}$) +- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) +- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) + +## Buses + +The balance equation for a bus is: + +$$ \label{eq:bus_balance} + \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_\text{in}}(\text{t}_i) = + \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) +$$ + +Optionally, a Bus can have a `excess_penalty_per_flow_hour` parameter, which allows to penalize the balance for missing or excess flow-rates. +This is usefull as it handles a possible ifeasiblity gently. + +This changes the balance to + +$$ \label{eq:bus_balance-excess} + \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_ \text{in}}(\text{t}_i) + \phi_\text{in}(\text{t}_i) = + \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) + \phi_\text{out}(\text{t}_i) +$$ + +The penalty term is defined as + +$$ \label{eq:bus_penalty} + s_{b \rightarrow \Phi}(\text{t}_i) = + \text a_{b \rightarrow \Phi}(\text{t}_i) \cdot \Delta \text{t}_i + \cdot [ \phi_\text{in}(\text{t}_i) + \phi_\text{out}(\text{t}_i) ] +$$ + +With: + +- $\mathcal{F}_\text{in}$ and $\mathcal{F}_\text{out}$ being the set of all incoming and outgoing flows +- $p_{f_\text{in}}(\text{t}_i)$ and $p_{f_\text{out}}(\text{t}_i)$ being the flow-rate at time $\text{t}_i$ for flow $f_\text{in}$ and $f_\text{out}$, respectively +- $\phi_\text{in}(\text{t}_i)$ and $\phi_\text{out}(\text{t}_i)$ being the missing or excess flow-rate at time $\text{t}_i$, respectively +- $\text{t}_i$ being the time step +- $s_{b \rightarrow \Phi}(\text{t}_i)$ being the penalty term +- $\text a_{b \rightarrow \Phi}(\text{t}_i)$ being the penalty coefficient (`excess_penalty_per_flow_hour`) + +## Flows + +The flow-rate is the main optimization variable of the Flow. It's limited by the size of the Flow and relative bounds \eqref{eq:flow_rate}. + +$$ \label{eq:flow_rate} + \text P \cdot \text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) + \leq p(\text{t}_{i}) \leq + \text P \cdot \text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) +$$ + +With: + +- $\text P$ being the size of the Flow +- $p(\text{t}_{i})$ being the flow-rate at time $\text{t}_{i}$ +- $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i})$ being the relative lower bound (typically 0) +- $\text p^{\text{U}}_{\text{rel}}(\text{t}_{i})$ being the relative upper bound (typically 1) + +With $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) = 0$ and $\text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) = 1$, +equation \eqref{eq:flow_rate} simplifies to + +$$ + 0 \leq p(\text{t}_{i}) \leq \text P +$$ + + +This mathematical Formulation can be extended or changed when using [OnOffParameters](#omoffparameters) +to define the On/Off state of the Flow, or [InvestParameters](#investments), +which changes the size of the Flow from a constant to an optimization variable. + +## LinearConverters +[`LinearConverters`][flixOpt.components.LinearConverter] define a ratio between incoming and outgoing [Flows](#flows). + +$$ \label{eq:Linear-Transformer-Ratio} + \sum_{f_{\text{in}} \in \mathcal F_{in}} \text a_{f_{\text{in}}}(\text{t}_i) \cdot p_{f_\text{in}}(\text{t}_i) = \sum_{f_{\text{out}} \in \mathcal F_{out}} \text b_{f_\text{out}}(\text{t}_i) \cdot p_{f_\text{out}}(\text{t}_i) +$$ + +With: + +- $\mathcal F_{in}$ and $\mathcal F_{out}$ being the set of all incoming and outgoing flows +- $p_{f_\text{in}}(\text{t}_i)$ and $p_{f_\text{out}}(\text{t}_i)$ being the flow-rate at time $\text{t}_i$ for flow $f_\text{in}$ and $f_\text{out}$, respectively +- $\text a_{f_\text{in}}(\text{t}_i)$ and $\text b_{f_\text{out}}(\text{t}_i)$ being the ratio of the flow-rate at time $\text{t}_i$ for flow $f_\text{in}$ and $f_\text{out}$, respectively + +With one incoming **Flow** and one outgoing **Flow**, this can be simplified to: + +$$ \label{eq:Linear-Transformer-Ratio-simple} + \text a(\text{t}_i) \cdot p_{f_\text{in}}(\text{t}_i) = p_{f_\text{out}}(\text{t}_i) +$$ + +where $\text a$ can be interpreted as the conversion efficiency of the **LinearTransformer**. +#### Piecewise Concersion factors +The conversion efficiency can be defined as a piecewise function. + +## Effects +## Features +### InvestParameters +### OnOffParameters + +## Calculation Modes \ No newline at end of file diff --git a/docs/concepts-and-math/index.md b/docs/concepts-and-math/index.md index 16be7e1e6..af7619ad0 100644 --- a/docs/concepts-and-math/index.md +++ b/docs/concepts-and-math/index.md @@ -2,17 +2,6 @@ flixOpt is built around a set of core concepts that work together to represent and optimize energy and material flow systems. This page provides a high-level overview of these concepts and how they interact. -## Mathematical Notation & Naming Conventions - -flixOpt uses the following naming conventions: - -- All optimization variables are denoted by italic letters (e.g., $x$, $y$, $z$) -- All parameters and constants are denoted by non italic small letters (e.g., $\text{a}$, $\text{b}$, $\text{c}$) -- All Sets are denoted by greek capital letters (e.g., $\mathcal{F}$, $\mathcal{E}$) -- All units of a set are denoted by greek small letters (e.g., $\mathcal{f}$, $\mathcal{e}$) -- The letter $i$ is used to denote an index (e.g., $i=1,\dots,\text n$) -- All time steps are denoted by the letter $\text{t}$ (e.g., $\text{t}_0$, $\text{t}_1$, $\text{t}_i$) - ## Core Concepts ### FlowSystem @@ -37,54 +26,18 @@ Non-equidistant time steps are also supported. [`Bus`][flixOpt.elements.Bus] objects represent nodes or connection points in a FlowSystem. They: -- Balance incoming and outgoing flows +- Balance incoming and outgoing flows \eqref{eq:bus_balance} - Can represent physical networks like heat, electricity, or gas -- Handle infeasible balances gently by allowing the balance to be closed in return for a big Penalty (optional) - -#### Mathematical Notation - -The balance equation for a bus is: - -$$ \label{eq:bus_balance} - \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_\text{in}}(\text{t}_i) = - \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) -$$ - -Optionally, a Bus can have a `excess_penalty_per_flow_hour` parameter, which allows to penalize the balance for missing or excess flow-rates. -This is usefull as it handles a possible ifeasiblity gently. - -This changes the balance to - -$$ \label{eq:bus_balance-excess} - \sum_{f_\text{in} \in \mathcal{F}_\text{in}} p_{f_ \text{in}}(\text{t}_i) + \phi_\text{in}(\text{t}_i) = - \sum_{f_\text{out} \in \mathcal{F}_\text{out}} p_{f_\text{out}}(\text{t}_i) + \phi_\text{out}(\text{t}_i) -$$ - -The penalty term is defined as - -$$ \label{eq:bus_penalty} - s_{b \rightarrow \Phi}(\text{t}_i) = - \text a_{b \rightarrow \Phi}(\text{t}_i) \cdot \Delta \text{t}_i - \cdot [ \phi_\text{in}(\text{t}_i) + \phi_\text{out}(\text{t}_i) ] -$$ - -With: - -- $\mathcal{F}_\text{in}$ and $\mathcal{F}_\text{out}$ being the set of all incoming and outgoing flows -- $p_{f_\text{in}}(\text{t}_i)$ and $p_{f_\text{out}}(\text{t}_i)$ being the flow-rate at time $\text{t}_i$ for flow $f_\text{in}$ and $f_\text{out}$, respectively -- $\phi_\text{in}(\text{t}_i)$ and $\phi_\text{out}(\text{t}_i)$ being the missing or excess flow-rate at time $\text{t}_i$, respectively -- $\text{t}_i$ being the time step -- $s_{b \rightarrow \Phi}(\text{t}_i)$ being the penalty term -- $\text a_{b \rightarrow \Phi}(\text{t}_i)$ being the penalty coefficient (`excess_penalty_per_flow_hour`) - +- Handle infeasible balances gently by allowing the balance to be closed in return for a big Penalty (optional) \eqref{eq:bus_balance-excess} ### Flows -[`Flow`][flixOpt.elements.Flow] objects represent the movement of energy or material between components and buses. They: +[`Flow`][flixOpt.elements.Flow] objects represent the movement of energy or material between a [Bus](#buses) and a [Component](#components) in a predefined direction. -- Have a size (fixed or part of an investment decision) +- Have a `flow_rate`, which is the main optimization variable of a Flow +- Have a `size` which defines how much energy or material can be moved (fixed or part of an investment decision) +- Have constraints to limit the flow-rate (min/max, total flow hours, on/off etc.) - Can have fixed profiles (for demands or renewable generation) -- Can have constraints (min/max, total flow hours, etc.) - Can have [Effects](#effects) associated by their use (operation, investment, on/off, ...) ### Components @@ -135,6 +88,7 @@ Any model created with flixOpt can be extended or modified using the great [lino This allows to adjust your model to very specific requirements without loosing the convenience of flixOpt. + ## Architechture (outdated) ![Architecture](../images/architecture_flixOpt.png) diff --git a/docs/examples/index.md b/docs/examples/index.md deleted file mode 100644 index c7a3d057b..000000000 --- a/docs/examples/index.md +++ /dev/null @@ -1,30 +0,0 @@ -# Examples - -## Minimal Example - -```python -{! ../examples/00_Minmal/minimal_example.py !} -``` - -## Simple example - -```python -{! ../examples/01_Simple/simple_example.py !} -``` - -## Complex example -This saves the results of a calculation to file and reloads them to analyze the results -### Build the Model -```python -{! ../examples/02_Complex/complex_example.py !} -``` -### Load the Results from file -```python -{! ../examples/02_Complex/complex_example_results.py !} -``` - -## Calculation Mode comparison -**Note:** This example relies on time series data. You can find it in the `examples` folder of the flixOpt repository. -```python -{! ../examples/03_Calculation_types/example_calculation_types.py !} -``` diff --git a/docs/latex-example.md b/docs/latex-example.md deleted file mode 100644 index 2591de954..000000000 --- a/docs/latex-example.md +++ /dev/null @@ -1,106 +0,0 @@ -# Effects and Mathematical Formulations - -This page demonstrates how to use LaTeX in flixOpt documentation and explains the Effects system. - -## Effects in flixOpt - -Effects in flixOpt represent impacts or metrics related to your energy system, such as costs, emissions, resource consumption, etc. One effect is designated as the optimization objective (typically costs), while others can have constraints. - -## Mathematical Formulations - -### Storage Model - -The state of charge of a storage evolves according to: - -$$SOC(t+1) = SOC(t) \cdot (1 - \lambda \cdot \Delta t) + \eta_{charge} \cdot P_{in}(t) \cdot \Delta t - \frac{P_{out}(t)}{\eta_{discharge}} \cdot \Delta t$$ - -Where: - -- $SOC(t)$ is the state of charge at time $t$ -- $\lambda$ is the self-discharge rate -- $\eta_{charge}$ is the charging efficiency -- $\eta_{discharge}$ is the discharging efficiency -- $P_{in}(t)$ is the charging power -- $P_{out}(t)$ is the discharging power -- $\Delta t$ is the time step - -### Linear Converter Efficiency - -For a linear converter, the relationship between input and output is: - -$$P_{out}(t) = \eta \cdot P_{in}(t)$$ - -Where: -- $P_{out}(t)$ is the output power -- $P_{in}(t)$ is the input power -- $\eta$ is the efficiency - -### Heat Pump COP - -For a heat pump, the relationship is: - -$$Q_{th}(t) = COP \cdot P_{el}(t)$$ - -Where: -- $Q_{th}(t)$ is the heat output -- $P_{el}(t)$ is the electrical input -- $COP$ is the coefficient of performance - -### Objective Function - -The objective function for cost minimization is: - -$$\min \left( \sum_{t=1}^{T} \sum_{c \in C} c_{op}(t) \cdot P_c(t) \cdot \Delta t + \sum_{c \in C} c_{inv} \cdot CAP_c \right)$$ - -Where: -- $c_{op}(t)$ is the operating cost at time $t$ -- $P_c(t)$ is the power of component $c$ at time $t$ -- $c_{inv}$ is the investment cost -- $CAP_c$ is the capacity of component $c$ - -## Effects API Documentation - -Effects are created using the `Effect` class: - -```python -import flixOpt as fo - -# Create a cost effect (optimization objective) -cost_effect = fo.Effect( - label="costs", - unit="€", - description="Total costs", - is_objective=True # This effect will be minimized -) - -# Create a CO2 emission effect with constraints -co2_effect = fo.Effect( - label="co2_emissions", - unit="kg_CO2", - description="CO2 emissions", - maximum_total=1000 # Maximum total emissions allowed -) - -# Add effects to the system -system.add_effects(cost_effect, co2_effect) -``` - -## Inline Formulas - -You can also use inline formulas like $E = mc^2$ or reference variables like $\eta_{boiler}$ within your text. - -## Multiple Equations Example - -The efficiency of a CHP unit must satisfy: - -$$\eta_{el} + \eta_{th} \leq \eta_{max}$$ - -The total flow through a bus must be balanced: - -$$\sum_{i \in I} F_{i,in}(t) = \sum_{j \in J} F_{j,out}(t)$$ - -For components with on/off decisions, the flow must satisfy: - -$$F_{min} \cdot \delta(t) \leq F(t) \leq F_{max} \cdot \delta(t)$$ - -Where $\delta(t)$ is a binary variable indicating if the component is on. diff --git a/flixOpt/elements.py b/flixOpt/elements.py index 71de7f711..63f451b2c 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -160,33 +160,7 @@ def __init__(self): class Flow(Element): r""" A **Flow** moves energy (or material) between a [Bus][flixOpt.elements.Bus] and a [Component][flixOpt.elements.Component] in a predefined direction. - The flow-rate $p(\text{t}_{i})$ is the main optimization variable of the **Flow**. - The size $\text P$ of the **Flow** combined with a relative upper bound $\text p_{\text{rel}}^{\text{U}}(\text{t}_{i})$ - and lower bound $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i})$ limits the flow-rate per time step $p(\text{t}_{i})$. - - $$ \label{eq:flow_rate} - \text P \cdot \text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) - \leq p(\text{t}_{i}) \leq - \text P \cdot \text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) \tag{1} - $$ - - With $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) = 0$ and $\text p^{\text{U}}_{\text{rel}}(\text{t}_{i}) = 1$, - equation \eqref{eq:flow_rate} simplifies to - - $$ - 0 \leq p(\text{t}_{i}) \leq \text P - $$ - - With $\text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) = \text p^{\text{U}}_{\text{rel}}(\text{t}_{i})$, - the flow-rate $p(\text{t}_{i})$ is fixed. - - $$ - p(\text{t}_{i}) = \text p^{\text{L}}_{\text{rel}}(\text{t}_{i}) \cdot \text P - $$ - - This mathematical Formulation can be extended or changed when using [`OnOffParameters`][flixOpt.interface.OnOffParameters] - to define the On/Off state of the Flow, or [`InvestParameters`][flixOpt.interface.InvestParameters], - which changes the size of the Flow to be optimized. + The flow-rate is the main optimization variable of the **Flow**. """ def __init__( From 780d1ffb0ec9773a7cac319ed68c0bfae5e892c8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 09:13:46 +0100 Subject: [PATCH 53/87] Improve example docs adn remove math from docstrings --- docs/examples/00-Minimal Example.md | 5 +++++ docs/examples/01-Basic Example.md | 5 +++++ docs/examples/02-Complex Example.md | 10 ++++++++++ docs/examples/03-Calculation Modes.md | 5 +++++ 4 files changed, 25 insertions(+) create mode 100644 docs/examples/00-Minimal Example.md create mode 100644 docs/examples/01-Basic Example.md create mode 100644 docs/examples/02-Complex Example.md create mode 100644 docs/examples/03-Calculation Modes.md diff --git a/docs/examples/00-Minimal Example.md b/docs/examples/00-Minimal Example.md new file mode 100644 index 000000000..c61283951 --- /dev/null +++ b/docs/examples/00-Minimal Example.md @@ -0,0 +1,5 @@ +# Minimal Example + +```python +{! ../examples/00_Minmal/minimal_example.py !} +``` \ No newline at end of file diff --git a/docs/examples/01-Basic Example.md b/docs/examples/01-Basic Example.md new file mode 100644 index 000000000..600f2516a --- /dev/null +++ b/docs/examples/01-Basic Example.md @@ -0,0 +1,5 @@ +# Simple example + +```python +{! ../examples/01_Simple/simple_example.py !} +``` \ No newline at end of file diff --git a/docs/examples/02-Complex Example.md b/docs/examples/02-Complex Example.md new file mode 100644 index 000000000..d5373c083 --- /dev/null +++ b/docs/examples/02-Complex Example.md @@ -0,0 +1,10 @@ +# Complex example +This saves the results of a calculation to file and reloads them to analyze the results +## Build the Model +```python +{! ../examples/02_Complex/complex_example.py !} +``` +## Load the Results from file +```python +{! ../examples/02_Complex/complex_example_results.py !} +``` \ No newline at end of file diff --git a/docs/examples/03-Calculation Modes.md b/docs/examples/03-Calculation Modes.md new file mode 100644 index 000000000..e94f815a8 --- /dev/null +++ b/docs/examples/03-Calculation Modes.md @@ -0,0 +1,5 @@ +# Calculation Mode comparison +**Note:** This example relies on time series data. You can find it in the `examples` folder of the flixOpt repository. +```python +{! ../examples/03_Calculation_types/example_calculation_types.py !} +``` From 914ef3789aa1e3fda06d206f3a32a14893f94b3f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 10:17:51 +0100 Subject: [PATCH 54/87] Add streamlit evaluation --- flixOpt/results.py | 4 + flixOpt/results_explorer.py | 606 ++++++++++++++++++++++++++++++++++++ 2 files changed, 610 insertions(+) create mode 100644 flixOpt/results_explorer.py diff --git a/flixOpt/results.py b/flixOpt/results.py index 326d1c693..4f15b84ef 100644 --- a/flixOpt/results.py +++ b/flixOpt/results.py @@ -13,6 +13,7 @@ from . import plotting from .core import TimeSeriesCollection from .io import _results_structure +from .results_explorer import explore_results if TYPE_CHECKING: from .calculation import Calculation, SegmentedCalculation @@ -41,6 +42,9 @@ class CalculationResults: hours_per_timestep: xr.DataArray The duration of each timestep in hours. """ + + explore_results = explore_results + @classmethod def from_file(cls, folder: Union[str, pathlib.Path], name: str): """ Create CalculationResults directly from file""" diff --git a/flixOpt/results_explorer.py b/flixOpt/results_explorer.py new file mode 100644 index 000000000..7d58e5a1e --- /dev/null +++ b/flixOpt/results_explorer.py @@ -0,0 +1,606 @@ +import streamlit as st +import pandas as pd +import plotly.graph_objects as go +from pathlib import Path +import tempfile +import webbrowser +import subprocess +import os +import atexit +import shutil +import sys +from typing import Union, Optional, Literal, Dict, List + +def explore_results(self, port: int = 8501): + """ + Launch a Streamlit app to explore the calculation results. + + This method creates a temporary Streamlit script that directly references + the current CalculationResults object and launches it in a browser. + + Args: + port: Port to use for the Streamlit server + + Returns: + subprocess.Popen: The running Streamlit process + """ + # Create a temporary file for the Streamlit app + with tempfile.NamedTemporaryFile(suffix='.py', delete=False) as f: + app_path = f.name + app_code = f""" +import streamlit as st +import pandas as pd +import numpy as np +import plotly.graph_objects as go +import plotly.express as px +import sys +from pathlib import Path + +# Add the current directory to path so we can import the calculation results +sys.path.append(r"{os.getcwd()}") + +# Set page config +st.set_page_config( + page_title="FlixOpt Results Explorer", + page_icon="📊", + layout="wide", + initial_sidebar_state="expanded" +) + +# Cache the calculation loading +@st.cache_resource +def get_calculation_results(): + # Import the relevant modules + try: + # Load the model from saved files (more reliable) + from flixOpt.results import CalculationResults + return CalculationResults.from_file(r"{self.folder}", "{self.name}") + except Exception as e: + st.error(f"Error loading calculation results: {{e}}") + return None + +# Load the calculation results +results = get_calculation_results() + +if results is None: + st.error("Failed to load calculation results.") + st.stop() + +# Create sidebar for navigation +st.sidebar.title("FlixOpt Results Explorer") +pages = ["Overview", "Components", "Buses", "Effects", "Variables", "Heatmaps"] +selected_page = st.sidebar.radio("Navigation", pages) + +# Overview page +if selected_page == "Overview": + st.title("Calculation Overview") + + # Model information + st.header("Model Information") + col1, col2 = st.columns(2) + + with col1: + st.write(f"**Name:** {{results.name}}") + st.write(f"**Time Steps:** {{len(results.timesteps_extra)}}") + if len(results.timesteps_extra) > 0: + st.write(f"**Time Range:** {{results.timesteps_extra[0]}} to {{results.timesteps_extra[-1]}}") + + with col2: + st.write(f"**Components:** {{len(results.components)}}") + st.write(f"**Buses:** {{len(results.buses)}}") + st.write(f"**Effects:** {{len(results.effects)}}") + + # Additional info + if hasattr(results, 'infos') and results.infos: + st.subheader("Additional Information") + st.json(results.infos) + + # Network visualization (simplified) + st.header("Network Structure") + + # Create a list of components and buses for network diagram + nodes = [] + edges = [] + + # Add components as nodes + for comp_name in results.components: + nodes.append({{"id": comp_name, "type": "component"}}) + + # Add buses as nodes + for bus_name in results.buses: + nodes.append({{"id": bus_name, "type": "bus"}}) + + # Show component connections + st.subheader("Component Connections") + connections_data = [] + + for comp_name, comp in results.components.items(): + for bus_name in comp.inputs + comp.outputs: + connections_data.append({{"Component": comp_name, "Bus": bus_name, + "Type": "Input" if bus_name in comp.inputs else "Output"}}) + + if connections_data: + st.dataframe(pd.DataFrame(connections_data)) + +# Components page +elif selected_page == "Components": + st.title("Components") + + # Component selector + component_names = list(results.components.keys()) + component_name = st.selectbox("Select a component:", component_names) + + if component_name: + component = results.components[component_name] + + st.header(f"Component: {{component_name}}") + + # Component tabs + tabs = st.tabs(["Flow Rates", "Charge State (if storage)", "Variables"]) + + # Flow Rates tab + with tabs[0]: + try: + flow_rates = component.flow_rates(with_last_timestep=True).to_dataframe() + + if not flow_rates.empty: + st.subheader("Flow Rates") + fig = go.Figure() + + for column in flow_rates.columns: + fig.add_trace(go.Scatter( + x=flow_rates.index, + y=flow_rates[column], + mode='lines', + name=column + )) + + fig.update_layout( + title=f"Flow Rates for {{component_name}}", + xaxis_title="Time", + yaxis_title="Flow Rate", + height=500 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show flow rates as table"): + st.dataframe(flow_rates) + else: + st.info("No flow rates data available for this component.") + except Exception as e: + st.error(f"Error displaying flow rates: {{e}}") + + # Charge State tab + with tabs[1]: + if component.is_storage: + try: + st.subheader("Charge State") + + # Get charge state data + charge_state = component.charge_state.solution.to_dataframe() + + fig = go.Figure() + fig.add_trace(go.Scatter( + x=charge_state.index, + y=charge_state.values.flatten(), + mode='lines', + name='Charge State' + )) + + fig.update_layout( + title=f"Charge State for {{component_name}}", + xaxis_title="Time", + yaxis_title="Charge", + height=500 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Show statistics + st.subheader("Charge State Statistics") + charge_vals = charge_state.values.flatten() + + col1, col2, col3, col4 = st.columns(4) + col1.metric("Minimum", f"{{charge_vals.min():.2f}}") + col2.metric("Maximum", f"{{charge_vals.max():.2f}}") + col3.metric("Average", f"{{charge_vals.mean():.2f}}") + col4.metric("Final", f"{{charge_vals[-1]:.2f}}") + + # Also show as dataframe if requested + if st.checkbox("Show charge state as table"): + st.dataframe(charge_state) + except Exception as e: + st.error(f"Error displaying charge state: {{e}}") + else: + st.info(f"Component {{component_name}} is not a storage component.") + + # Variables tab + with tabs[2]: + st.subheader("Component Variables") + + for var_name in component._variables: + try: + var = component.variables[var_name] + + # Create an expander for each variable + with st.expander(f"Variable: {{var_name}}"): + var_solution = var.solution + + # Check if this is a time-based variable + if 'time' in var_solution.dims: + # Plot time series + df = var_solution.to_dataframe() + + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[var_name], + mode='lines', + name=var_name + )) + + fig.update_layout( + title=f"{{var_name}} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=300 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + # Show scalar value + st.write(f"Value: {{var_solution.values}}") + except Exception as e: + st.error(f"Error displaying variable {{var_name}}: {{e}}") + +# Buses page +elif selected_page == "Buses": + st.title("Buses") + + # Bus selector + bus_names = list(results.buses.keys()) + bus_name = st.selectbox("Select a bus:", bus_names) + + if bus_name: + bus = results.buses[bus_name] + + st.header(f"Bus: {{bus_name}}") + + # Show flow rates + try: + flow_rates = bus.flow_rates(with_last_timestep=True).to_dataframe() + + if not flow_rates.empty: + st.subheader("Flow Rates") + fig = go.Figure() + + for column in flow_rates.columns: + fig.add_trace(go.Scatter( + x=flow_rates.index, + y=flow_rates[column], + mode='lines', + name=column + )) + + fig.update_layout( + title=f"Flow Rates for {{bus_name}}", + xaxis_title="Time", + yaxis_title="Flow Rate", + height=500 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Calculate and show balance + st.subheader("Flow Balance") + + inputs = [col for col in flow_rates.columns if col in bus.inputs] + outputs = [col for col in flow_rates.columns if col in bus.outputs] + + balance_df = pd.DataFrame(index=flow_rates.index) + + if inputs: + balance_df['Total Input'] = flow_rates[inputs].sum(axis=1) + else: + balance_df['Total Input'] = 0 + + if outputs: + balance_df['Total Output'] = flow_rates[outputs].sum(axis=1) + else: + balance_df['Total Output'] = 0 + + balance_df['Net Flow'] = balance_df['Total Input'] + balance_df['Total Output'] + + fig = go.Figure() + for column in balance_df.columns: + fig.add_trace(go.Scatter( + x=balance_df.index, + y=balance_df[column], + mode='lines', + name=column + )) + + fig.update_layout( + title=f"Flow Balance for {{bus_name}}", + xaxis_title="Time", + yaxis_title="Flow Rate", + height=400 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show flow data as table"): + st.dataframe(flow_rates) + else: + st.info("No flow rates data available for this bus.") + except Exception as e: + st.error(f"Error displaying flow rates: {{e}}") + + # Show inputs and outputs + col1, col2 = st.columns(2) + + with col1: + st.subheader("Inputs") + for input_name in bus.inputs: + st.write(f"- {{input_name}}") + + with col2: + st.subheader("Outputs") + for output_name in bus.outputs: + st.write(f"- {{output_name}}") + +# Effects page +elif selected_page == "Effects": + st.title("Effects") + + # Effect selector + effect_names = list(results.effects.keys()) + + if effect_names: + effect_name = st.selectbox("Select an effect:", effect_names) + + if effect_name: + effect = results.effects[effect_name] + + st.header(f"Effect: {{effect_name}}") + + # List variables + st.subheader("Variables") + + for var_name in effect._variables: + try: + var = effect.variables[var_name] + + # Create an expander for each variable + with st.expander(f"Variable: {{var_name}}"): + var_solution = var.solution + + # Check if this is a time-based variable + if 'time' in var_solution.dims: + # Plot time series + df = var_solution.to_dataframe() + + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[var_name], + mode='lines', + name=var_name + )) + + fig.update_layout( + title=f"{{var_name}} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=300 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + # Show scalar value + st.write(f"Value: {{var_solution.values}}") + except Exception as e: + st.error(f"Error displaying variable {{var_name}}: {{e}}") + else: + st.info("No effects available in this calculation.") + +# Variables page +elif selected_page == "Variables": + st.title("Model Variables") + + # Add a filter option + variable_filter = st.text_input("Filter variables by name:") + + # Get all variables and apply filter + all_variables = list(results.model.variables) + + if variable_filter: + filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] + else: + filtered_variables = all_variables + + st.write(f"Showing {{len(filtered_variables)}} of {{len(all_variables)}} variables") + + # Variable selection + variable_name = st.selectbox("Select a variable:", filtered_variables) + + if variable_name: + try: + variable = results.model.variables[variable_name] + var_solution = variable.solution + + st.header(f"Variable: {{variable_name}}") + + # Basic info + st.subheader("Information") + st.write(f"**Dimensions:** {{', '.join(var_solution.dims)}}") + st.write(f"**Shape:** {{var_solution.shape}}") + + # Visualization based on dimensionality + if 'time' in var_solution.dims: + st.subheader("Time Series") + + df = var_solution.to_dataframe() + + # Simple case: just time dimension + if len(df.columns) == 1: + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[variable_name], + mode='lines', + name=variable_name + )) + + fig.update_layout( + title=f"{{variable_name}} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=500 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show data as table"): + st.dataframe(df) + else: + # Multi-dimensional + st.write("This variable has multiple dimensions. Choose visualization type:") + + viz_type = st.radio( + "Visualization type:", + ["Line chart (all dimensions)", "Heatmap", "Raw data table"] + ) + + if viz_type == "Line chart (all dimensions)": + fig = go.Figure() + + # Limited to first 20 dimensions to avoid overloading + columns_to_plot = list(df.columns)[:20] + + if len(df.columns) > 20: + st.warning(f"Variable has {{len(df.columns)}} dimensions. Showing only first 20.") + + for column in columns_to_plot: + fig.add_trace(go.Scatter( + x=df.index, + y=df[column], + mode='lines', + name=str(column) + )) + + fig.update_layout( + title=f"{{variable_name}} Time Series (Multiple Dimensions)", + xaxis_title="Time", + yaxis_title="Value", + height=600 + ) + + st.plotly_chart(fig, use_container_width=True) + + elif viz_type == "Heatmap": + # Try to use the built-in heatmap function + try: + fig = results.plot_heatmap( + variable=variable_name, + show=False, + save=False + ) + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error creating heatmap: {{e}}") + + elif viz_type == "Raw data table": + st.dataframe(df) + else: + # Non-time series data + st.subheader("Values") + st.write(var_solution.values) + except Exception as e: + st.error(f"Error displaying variable {{variable_name}}: {{e}}") + +# Heatmaps page +elif selected_page == "Heatmaps": + st.title("Heatmap Generator") + + # Get time-based variables + time_vars = [var_name for var_name, var in results.model.variables.items() + if 'time' in var.solution.dims] + + # Variable selection + variable_name = st.selectbox("Select a variable:", time_vars) + + if variable_name: + # Configure heatmap settings + st.subheader("Heatmap Settings") + + col1, col2, col3 = st.columns(3) + + with col1: + timeframes = st.selectbox( + "Timeframe grouping:", + ["YS", "MS", "W", "D", "h", "15min", "min"], + index=2 # Default to "W" + ) + + with col2: + timesteps = st.selectbox( + "Timesteps per frame:", + ["W", "D", "h", "15min", "min"], + index=2 # Default to "h" + ) + + with col3: + color_map = st.selectbox( + "Color map:", + ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], + index=0 + ) + + # Generate button + if st.button("Generate Heatmap"): + try: + st.subheader(f"Heatmap for {{variable_name}}") + + fig = results.plot_heatmap( + variable=variable_name, + heatmap_timeframes=timeframes, + heatmap_timesteps_per_frame=timesteps, + color_map=color_map, + show=False, + save=False + ) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error generating heatmap: {{e}}") +""" + + f.write(app_code.encode('utf-8')) + + # Construct the command to run Streamlit + cmd = [sys.executable, "-m", "streamlit", "run", app_path, "--server.port", str(port)] + + # Launch the Streamlit app + process = subprocess.Popen(cmd) + + # Open browser + webbrowser.open(f'http://localhost:{port}') + + # Set up cleanup of temporary file when the process exits + def cleanup_temp_file(): + try: + os.unlink(app_path) + print(f"Cleaned up temporary app file: {app_path}") + except Exception as e: + print(f"Error cleaning up temporary app file: {e}") + + atexit.register(cleanup_temp_file) + + print(f"Streamlit app launched on port {port}. Press Ctrl+C to stop the app.") + + return process From e8adffe42b110b05f2d41c5a2435233e6955c016 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 10:32:28 +0100 Subject: [PATCH 55/87] Update the file structure --- flixOpt/explorer_app.py | 652 ++++++++++++++++++++++++++++++++++++ flixOpt/results_explorer.py | 637 +++-------------------------------- 2 files changed, 699 insertions(+), 590 deletions(-) create mode 100644 flixOpt/explorer_app.py diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py new file mode 100644 index 000000000..1feffc082 --- /dev/null +++ b/flixOpt/explorer_app.py @@ -0,0 +1,652 @@ +# FlixOpt Results Explorer App + +import argparse +import os +import sys +from pathlib import Path + +import numpy as np +import pandas as pd +import plotly.express as px +import plotly.graph_objects as go +import streamlit as st + +# Parse command line arguments +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='FlixOpt Results Explorer') + parser.add_argument('folder', type=str, help='Results folder path') + parser.add_argument('name', type=str, help='Calculation name') + args = parser.parse_args() + + results_folder = args.folder + results_name = args.name +else: + # Default values when imported as module + results_folder = "." + results_name = "results" + +# Set page config +st.set_page_config( + page_title="FlixOpt Results Explorer", + page_icon="📊", + layout="wide", + initial_sidebar_state="expanded" +) + +# Helper function to capture plotly figures +def get_plotly_fig(plot_func, *args, **kwargs): + """Capture a plotly figure from a plotting function""" + # Add default parameters to ensure the function returns the figure without showing it + kwargs['show'] = False + kwargs['save'] = False + + # Call the plotting function + return plot_func(*args, **kwargs) + +# Cache the calculation loading +@st.cache_resource +def get_calculation_results(folder, name): + # Import the relevant modules + try: + # Try different import approaches + try: + # First try standard import + try: + from flixopt.results import CalculationResults + except ImportError: + from flixOpt.results import CalculationResults + except ImportError: + # Add potential module paths + for path in [os.getcwd(), os.path.dirname(os.path.abspath(__file__))]: + if path not in sys.path: + sys.path.append(path) + + # Try again with modified path + try: + from flixopt.results import CalculationResults + except ImportError: + from flixOpt.results import CalculationResults + + # Load from file + return CalculationResults.from_file(folder, name) + except Exception as e: + st.error(f"Error loading calculation results: {e}") + return None + +# Load the calculation results +results = get_calculation_results(results_folder, results_name) + +if results is None: + st.error("Failed to load calculation results.") + st.stop() + +# Create sidebar for navigation +st.sidebar.title("FlixOpt Results Explorer") +pages = ["Overview", "Components", "Buses", "Effects", "Variables", "Heatmaps"] +selected_page = st.sidebar.radio("Navigation", pages) + +# Overview page +if selected_page == "Overview": + st.title("Calculation Overview") + + # Model information + st.header("Model Information") + col1, col2 = st.columns(2) + + with col1: + st.write(f"**Name:** {results.name}") + st.write(f"**Folder:** {results.folder}") + st.write(f"**Time Steps:** {len(results.timesteps_extra)}") + if len(results.timesteps_extra) > 0: + st.write(f"**Time Range:** {results.timesteps_extra[0]} to {results.timesteps_extra[-1]}") + + with col2: + st.write(f"**Components:** {len(results.components)}") + st.write(f"**Buses:** {len(results.buses)}") + st.write(f"**Effects:** {len(results.effects)}") + st.write(f"**Storage Components:** {len(results.storages)}") + + # Additional info + if hasattr(results, 'infos') and results.infos: + st.subheader("Additional Information") + st.json(results.infos) + + # Network info + if hasattr(results, 'network_infos') and results.network_infos: + st.subheader("Network Information") + st.json(results.network_infos) + + # Network visualization + st.header("Network Structure") + + # Show component connections + st.subheader("Component Connections") + connections_data = [] + + for comp_name, comp in results.components.items(): + for bus_name in comp.inputs + comp.outputs: + connections_data.append({ + "Component": comp_name, + "Bus": bus_name, + "Type": "Input" if bus_name in comp.inputs else "Output" + }) + + if connections_data: + st.dataframe(pd.DataFrame(connections_data)) + +# Components page +elif selected_page == "Components": + st.title("Components") + + # Component selector + component_names = list(results.components.keys()) + + # Allow grouping by storage/non-storage + show_storage_first = st.checkbox("Show storage components first", value=True) + + if show_storage_first: + storage_components = [comp.label for comp in results.storages] + non_storage_components = [name for name in component_names if name not in storage_components] + sorted_components = storage_components + non_storage_components + else: + sorted_components = sorted(component_names) + + component_name = st.selectbox("Select a component:", sorted_components) + + if component_name: + component = results.components[component_name] + + st.header(f"Component: {component_name}") + if component.is_storage: + st.info("This is a storage component") + + # Component tabs + tabs = st.tabs(["Flow Rates", "Charge State (if storage)", "Variables & Constraints"]) + + # Flow Rates tab + with tabs[0]: + try: + st.subheader("Flow Rates") + + # Use built-in plotting method + fig = get_plotly_fig(component.plot_flow_rates) + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show flow rates as table"): + flow_rates = component.flow_rates(with_last_timestep=True).to_dataframe() + st.dataframe(flow_rates) + except Exception as e: + st.error(f"Error displaying flow rates: {e}") + + # Charge State tab + with tabs[1]: + if component.is_storage: + try: + st.subheader("Charge State") + + # Use built-in charge state plotting method + fig = get_plotly_fig(component.plot_charge_state) + st.plotly_chart(fig, use_container_width=True) + + # Show statistics + st.subheader("Charge State Statistics") + charge_state = component.charge_state.solution.to_dataframe() + charge_vals = charge_state.values.flatten() + + col1, col2, col3, col4 = st.columns(4) + col1.metric("Minimum", f"{charge_vals.min():.2f}") + col2.metric("Maximum", f"{charge_vals.max():.2f}") + col3.metric("Average", f"{charge_vals.mean():.2f}") + col4.metric("Final", f"{charge_vals[-1]:.2f}") + + # Also show as dataframe if requested + if st.checkbox("Show charge state as table"): + st.dataframe(charge_state) + except Exception as e: + st.error(f"Error displaying charge state: {e}") + else: + st.info(f"Component {component_name} is not a storage component.") + + # Variables tab + with tabs[2]: + col1, col2 = st.columns(2) + + with col1: + st.subheader("Variables") + for var_name in component._variables: + with st.expander(f"Variable: {var_name}"): + try: + var = component.variables[var_name] + var_solution = var.solution + + # Check if this is a time-based variable + if 'time' in var_solution.dims: + # Plot time series + df = var_solution.to_dataframe() + + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[var_name], + mode='lines', + name=var_name + )) + + fig.update_layout( + title=f"{var_name} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=300 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + # Show scalar value + st.write(f"Value: {var_solution.values}") + except Exception as e: + st.error(f"Error displaying variable {var_name}: {e}") + + with col2: + st.subheader("Constraints") + for constraint_name in component._constraints: + with st.expander(f"Constraint: {constraint_name}"): + try: + constraint = component.constraints[constraint_name] + st.write(f"Constraint type: {constraint.sense}") + + # If constraint has a time dimension, try to plot it + if hasattr(constraint, 'dual'): + dual = constraint.dual + if hasattr(dual, 'dims') and 'time' in dual.dims: + df = dual.to_dataframe() + + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[constraint_name], + mode='lines', + name='Dual Value' + )) + + fig.update_layout( + title=f"Dual Values for {constraint_name}", + xaxis_title="Time", + yaxis_title="Value", + height=300 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + st.write(f"Dual value: {dual}") + except Exception as e: + st.error(f"Error displaying constraint {constraint_name}: {e}") + +# Buses page +elif selected_page == "Buses": + st.title("Buses") + + # Bus selector + bus_names = list(results.buses.keys()) + bus_name = st.selectbox("Select a bus:", sorted(bus_names)) + + if bus_name: + bus = results.buses[bus_name] + + st.header(f"Bus: {bus_name}") + + # Show flow rates + try: + st.subheader("Flow Rates") + + # Use built-in plotting method + fig = get_plotly_fig(bus.plot_flow_rates) + st.plotly_chart(fig, use_container_width=True) + + # Calculate and show balance + st.subheader("Flow Balance") + + flow_rates = bus.flow_rates(with_last_timestep=True).to_dataframe() + inputs = [col for col in flow_rates.columns if col in bus.inputs] + outputs = [col for col in flow_rates.columns if col in bus.outputs] + + balance_df = pd.DataFrame(index=flow_rates.index) + + if inputs: + balance_df['Total Input'] = flow_rates[inputs].sum(axis=1) + else: + balance_df['Total Input'] = 0 + + if outputs: + balance_df['Total Output'] = flow_rates[outputs].sum(axis=1) + else: + balance_df['Total Output'] = 0 + + balance_df['Net Flow'] = balance_df['Total Input'] + balance_df['Total Output'] + + fig = go.Figure() + for column in balance_df.columns: + fig.add_trace(go.Scatter( + x=balance_df.index, + y=balance_df[column], + mode='lines', + name=column + )) + + fig.update_layout( + title=f"Flow Balance for {bus_name}", + xaxis_title="Time", + yaxis_title="Flow Rate", + height=400 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show flow data as table"): + st.dataframe(flow_rates) + except Exception as e: + st.error(f"Error displaying flow rates: {e}") + + # Show inputs and outputs + col1, col2 = st.columns(2) + + with col1: + st.subheader("Inputs") + for input_name in bus.inputs: + st.write(f"- {input_name}") + + with col2: + st.subheader("Outputs") + for output_name in bus.outputs: + st.write(f"- {output_name}") + +# Effects page +elif selected_page == "Effects": + st.title("Effects") + + # Effect selector + effect_names = list(results.effects.keys()) + + if effect_names: + effect_name = st.selectbox("Select an effect:", sorted(effect_names)) + + if effect_name: + effect = results.effects[effect_name] + + st.header(f"Effect: {effect_name}") + + # List variables + st.subheader("Variables") + + for var_name in effect._variables: + try: + var = effect.variables[var_name] + + # Create an expander for each variable + with st.expander(f"Variable: {var_name}"): + var_solution = var.solution + + # Check if this is a time-based variable + if 'time' in var_solution.dims: + # Plot time series + df = var_solution.to_dataframe() + + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[var_name], + mode='lines', + name=var_name + )) + + fig.update_layout( + title=f"{var_name} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=300 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + # Show scalar value + st.write(f"Value: {var_solution.values}") + except Exception as e: + st.error(f"Error displaying variable {var_name}: {e}") + + # List shares + connected_elements = set() + for var_name in effect._variables: + if '->' in var_name: + elem = var_name.split('->')[0] + connected_elements.add(elem) + + if connected_elements: + st.subheader("Element Shares") + + for elem in sorted(connected_elements): + with st.expander(f"Shares from {elem}"): + try: + shares = effect.get_shares_from(elem) + + # Plot shares if time-based + time_shares = [s for s in shares if 'time' in shares[s].solution.dims] + + if time_shares: + df = pd.DataFrame() + for share_name in time_shares: + share_df = shares[share_name].solution.to_dataframe() + df[share_name] = share_df[share_name] + + fig = go.Figure() + for col in df.columns: + fig.add_trace(go.Scatter( + x=df.index, + y=df[col], + mode='lines', + name=col + )) + + fig.update_layout( + title=f"Shares from {elem}", + xaxis_title="Time", + yaxis_title="Share", + height=400 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + # Display as simple table + share_data = [] + for share_name in shares: + share_data.append({ + "Share": share_name, + "Value": float(shares[share_name].solution.values.flatten()[0]) + }) + + if share_data: + st.table(pd.DataFrame(share_data)) + except Exception as e: + st.error(f"Error displaying shares from {elem}: {e}") + else: + st.info("No effects available in this calculation.") + +# Variables page +elif selected_page == "Variables": + st.title("Model Variables") + + # Add a filter option + variable_filter = st.text_input("Filter variables by name:") + + # Get all variables and apply filter + all_variables = list(results.model.variables) + + if variable_filter: + filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] + else: + filtered_variables = all_variables + + st.write(f"Showing {len(filtered_variables)} of {len(all_variables)} variables") + + # Variable selection + variable_name = st.selectbox("Select a variable:", filtered_variables) + + if variable_name: + try: + variable = results.model.variables[variable_name] + var_solution = variable.solution + + st.header(f"Variable: {variable_name}") + + # Basic info + st.subheader("Information") + st.write(f"**Dimensions:** {', '.join(var_solution.dims)}") + st.write(f"**Shape:** {var_solution.shape}") + + # Visualization based on dimensionality + if 'time' in var_solution.dims: + st.subheader("Time Series") + + df = var_solution.to_dataframe() + + # Simple case: just time dimension + if len(df.columns) == 1: + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[variable_name], + mode='lines', + name=variable_name + )) + + fig.update_layout( + title=f"{variable_name} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=500 + ) + + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show data as table"): + st.dataframe(df) + else: + # Multi-dimensional + st.write("This variable has multiple dimensions. Choose visualization type:") + + viz_type = st.radio( + "Visualization type:", + ["Line chart (all dimensions)", "Heatmap", "Raw data table"] + ) + + if viz_type == "Line chart (all dimensions)": + fig = go.Figure() + + # Limited to first 20 dimensions to avoid overloading + columns_to_plot = list(df.columns)[:20] + + if len(df.columns) > 20: + st.warning(f"Variable has {len(df.columns)} dimensions. Showing only first 20.") + + for column in columns_to_plot: + fig.add_trace(go.Scatter( + x=df.index, + y=df[column], + mode='lines', + name=str(column) + )) + + fig.update_layout( + title=f"{variable_name} Time Series (Multiple Dimensions)", + xaxis_title="Time", + yaxis_title="Value", + height=600 + ) + + st.plotly_chart(fig, use_container_width=True) + + elif viz_type == "Heatmap": + # Use the built-in heatmap function + try: + fig = get_plotly_fig(results.plot_heatmap, variable=variable_name) + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error creating heatmap with built-in function: {e}") + + # Fallback to basic heatmap + try: + st.write("Using basic heatmap visualization instead:") + fig = px.imshow( + df.pivot_table(columns='time').T, + color_continuous_scale="Blues", + title=f"Heatmap for {variable_name}" + ) + st.plotly_chart(fig, use_container_width=True) + except Exception as e2: + st.error(f"Error creating basic heatmap: {e2}") + + elif viz_type == "Raw data table": + st.dataframe(df) + else: + # Non-time series data + st.subheader("Values") + st.write(var_solution.values) + except Exception as e: + st.error(f"Error displaying variable {variable_name}: {e}") + +# Heatmaps page +elif selected_page == "Heatmaps": + st.title("Heatmap Generator") + + # Get time-based variables + time_vars = [var_name for var_name, var in results.model.variables.items() + if 'time' in var.solution.dims] + + # Variable selection + variable_name = st.selectbox("Select a variable:", time_vars) + + if variable_name: + # Configure heatmap settings + st.subheader("Heatmap Settings") + + col1, col2, col3 = st.columns(3) + + with col1: + timeframes = st.selectbox( + "Timeframe grouping:", + ["YS", "MS", "W", "D", "h", "15min", "min"], + index=2 # Default to "W" + ) + + with col2: + timesteps = st.selectbox( + "Timesteps per frame:", + ["W", "D", "h", "15min", "min"], + index=2 # Default to "h" + ) + + with col3: + color_map = st.selectbox( + "Color map:", + ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], + index=0 + ) + + # Generate button + if st.button("Generate Heatmap"): + try: + st.subheader(f"Heatmap for {variable_name}") + + # Use the built-in heatmap function + fig = get_plotly_fig( + results.plot_heatmap, + variable=variable_name, + heatmap_timeframes=timeframes, + heatmap_timesteps_per_frame=timesteps, + color_map=color_map + ) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error generating heatmap: {e}") diff --git a/flixOpt/results_explorer.py b/flixOpt/results_explorer.py index 7d58e5a1e..359759885 100644 --- a/flixOpt/results_explorer.py +++ b/flixOpt/results_explorer.py @@ -1,606 +1,63 @@ -import streamlit as st -import pandas as pd -import plotly.graph_objects as go -from pathlib import Path -import tempfile -import webbrowser -import subprocess +""" +FlixOpt Results Explorer + +A module for launching a Streamlit app to explore flixOpt calculation results. +""" + import os -import atexit -import shutil +import subprocess import sys -from typing import Union, Optional, Literal, Dict, List +import webbrowser +from pathlib import Path -def explore_results(self, port: int = 8501): + +def explore_results(self, port=8501): """ Launch a Streamlit app to explore the calculation results. - - This method creates a temporary Streamlit script that directly references - the current CalculationResults object and launches it in a browser. - + Args: port: Port to use for the Streamlit server - + Returns: subprocess.Popen: The running Streamlit process """ - # Create a temporary file for the Streamlit app - with tempfile.NamedTemporaryFile(suffix='.py', delete=False) as f: - app_path = f.name - app_code = f""" -import streamlit as st -import pandas as pd -import numpy as np -import plotly.graph_objects as go -import plotly.express as px -import sys -from pathlib import Path - -# Add the current directory to path so we can import the calculation results -sys.path.append(r"{os.getcwd()}") + import subprocess + import sys + import webbrowser + import os + from pathlib import Path + + # Find explorer app path + current_dir = os.path.dirname(os.path.abspath(__file__)) + explorer_script = os.path.join(current_dir, 'explorer_app.py') + + # If the explorer app doesn't exist, inform the user + if not os.path.exists(explorer_script): + raise FileNotFoundError( + f'Explorer app not found at {explorer_script}. ' + 'Please ensure the explorer_app.py file is in the flixOpt package directory.' + ) + + # Run the Streamlit app - the port argument needs to be separate from the script arguments + cmd = [ + sys.executable, + '-m', + 'streamlit', + 'run', + explorer_script, + '--server.port', + str(port), + '--', # This separator is important + str(self.folder), + self.name, + ] -# Set page config -st.set_page_config( - page_title="FlixOpt Results Explorer", - page_icon="📊", - layout="wide", - initial_sidebar_state="expanded" -) - -# Cache the calculation loading -@st.cache_resource -def get_calculation_results(): - # Import the relevant modules - try: - # Load the model from saved files (more reliable) - from flixOpt.results import CalculationResults - return CalculationResults.from_file(r"{self.folder}", "{self.name}") - except Exception as e: - st.error(f"Error loading calculation results: {{e}}") - return None - -# Load the calculation results -results = get_calculation_results() - -if results is None: - st.error("Failed to load calculation results.") - st.stop() - -# Create sidebar for navigation -st.sidebar.title("FlixOpt Results Explorer") -pages = ["Overview", "Components", "Buses", "Effects", "Variables", "Heatmaps"] -selected_page = st.sidebar.radio("Navigation", pages) - -# Overview page -if selected_page == "Overview": - st.title("Calculation Overview") - - # Model information - st.header("Model Information") - col1, col2 = st.columns(2) - - with col1: - st.write(f"**Name:** {{results.name}}") - st.write(f"**Time Steps:** {{len(results.timesteps_extra)}}") - if len(results.timesteps_extra) > 0: - st.write(f"**Time Range:** {{results.timesteps_extra[0]}} to {{results.timesteps_extra[-1]}}") - - with col2: - st.write(f"**Components:** {{len(results.components)}}") - st.write(f"**Buses:** {{len(results.buses)}}") - st.write(f"**Effects:** {{len(results.effects)}}") - - # Additional info - if hasattr(results, 'infos') and results.infos: - st.subheader("Additional Information") - st.json(results.infos) - - # Network visualization (simplified) - st.header("Network Structure") - - # Create a list of components and buses for network diagram - nodes = [] - edges = [] - - # Add components as nodes - for comp_name in results.components: - nodes.append({{"id": comp_name, "type": "component"}}) - - # Add buses as nodes - for bus_name in results.buses: - nodes.append({{"id": bus_name, "type": "bus"}}) - - # Show component connections - st.subheader("Component Connections") - connections_data = [] - - for comp_name, comp in results.components.items(): - for bus_name in comp.inputs + comp.outputs: - connections_data.append({{"Component": comp_name, "Bus": bus_name, - "Type": "Input" if bus_name in comp.inputs else "Output"}}) - - if connections_data: - st.dataframe(pd.DataFrame(connections_data)) - -# Components page -elif selected_page == "Components": - st.title("Components") - - # Component selector - component_names = list(results.components.keys()) - component_name = st.selectbox("Select a component:", component_names) - - if component_name: - component = results.components[component_name] - - st.header(f"Component: {{component_name}}") - - # Component tabs - tabs = st.tabs(["Flow Rates", "Charge State (if storage)", "Variables"]) - - # Flow Rates tab - with tabs[0]: - try: - flow_rates = component.flow_rates(with_last_timestep=True).to_dataframe() - - if not flow_rates.empty: - st.subheader("Flow Rates") - fig = go.Figure() - - for column in flow_rates.columns: - fig.add_trace(go.Scatter( - x=flow_rates.index, - y=flow_rates[column], - mode='lines', - name=column - )) - - fig.update_layout( - title=f"Flow Rates for {{component_name}}", - xaxis_title="Time", - yaxis_title="Flow Rate", - height=500 - ) - - st.plotly_chart(fig, use_container_width=True) - - # Also show as dataframe if requested - if st.checkbox("Show flow rates as table"): - st.dataframe(flow_rates) - else: - st.info("No flow rates data available for this component.") - except Exception as e: - st.error(f"Error displaying flow rates: {{e}}") - - # Charge State tab - with tabs[1]: - if component.is_storage: - try: - st.subheader("Charge State") - - # Get charge state data - charge_state = component.charge_state.solution.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=charge_state.index, - y=charge_state.values.flatten(), - mode='lines', - name='Charge State' - )) - - fig.update_layout( - title=f"Charge State for {{component_name}}", - xaxis_title="Time", - yaxis_title="Charge", - height=500 - ) - - st.plotly_chart(fig, use_container_width=True) - - # Show statistics - st.subheader("Charge State Statistics") - charge_vals = charge_state.values.flatten() - - col1, col2, col3, col4 = st.columns(4) - col1.metric("Minimum", f"{{charge_vals.min():.2f}}") - col2.metric("Maximum", f"{{charge_vals.max():.2f}}") - col3.metric("Average", f"{{charge_vals.mean():.2f}}") - col4.metric("Final", f"{{charge_vals[-1]:.2f}}") - - # Also show as dataframe if requested - if st.checkbox("Show charge state as table"): - st.dataframe(charge_state) - except Exception as e: - st.error(f"Error displaying charge state: {{e}}") - else: - st.info(f"Component {{component_name}} is not a storage component.") - - # Variables tab - with tabs[2]: - st.subheader("Component Variables") - - for var_name in component._variables: - try: - var = component.variables[var_name] - - # Create an expander for each variable - with st.expander(f"Variable: {{var_name}}"): - var_solution = var.solution - - # Check if this is a time-based variable - if 'time' in var_solution.dims: - # Plot time series - df = var_solution.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[var_name], - mode='lines', - name=var_name - )) - - fig.update_layout( - title=f"{{var_name}} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=300 - ) - - st.plotly_chart(fig, use_container_width=True) - else: - # Show scalar value - st.write(f"Value: {{var_solution.values}}") - except Exception as e: - st.error(f"Error displaying variable {{var_name}}: {{e}}") - -# Buses page -elif selected_page == "Buses": - st.title("Buses") - - # Bus selector - bus_names = list(results.buses.keys()) - bus_name = st.selectbox("Select a bus:", bus_names) - - if bus_name: - bus = results.buses[bus_name] - - st.header(f"Bus: {{bus_name}}") - - # Show flow rates - try: - flow_rates = bus.flow_rates(with_last_timestep=True).to_dataframe() - - if not flow_rates.empty: - st.subheader("Flow Rates") - fig = go.Figure() - - for column in flow_rates.columns: - fig.add_trace(go.Scatter( - x=flow_rates.index, - y=flow_rates[column], - mode='lines', - name=column - )) - - fig.update_layout( - title=f"Flow Rates for {{bus_name}}", - xaxis_title="Time", - yaxis_title="Flow Rate", - height=500 - ) - - st.plotly_chart(fig, use_container_width=True) - - # Calculate and show balance - st.subheader("Flow Balance") - - inputs = [col for col in flow_rates.columns if col in bus.inputs] - outputs = [col for col in flow_rates.columns if col in bus.outputs] - - balance_df = pd.DataFrame(index=flow_rates.index) - - if inputs: - balance_df['Total Input'] = flow_rates[inputs].sum(axis=1) - else: - balance_df['Total Input'] = 0 - - if outputs: - balance_df['Total Output'] = flow_rates[outputs].sum(axis=1) - else: - balance_df['Total Output'] = 0 - - balance_df['Net Flow'] = balance_df['Total Input'] + balance_df['Total Output'] - - fig = go.Figure() - for column in balance_df.columns: - fig.add_trace(go.Scatter( - x=balance_df.index, - y=balance_df[column], - mode='lines', - name=column - )) - - fig.update_layout( - title=f"Flow Balance for {{bus_name}}", - xaxis_title="Time", - yaxis_title="Flow Rate", - height=400 - ) - - st.plotly_chart(fig, use_container_width=True) - - # Also show as dataframe if requested - if st.checkbox("Show flow data as table"): - st.dataframe(flow_rates) - else: - st.info("No flow rates data available for this bus.") - except Exception as e: - st.error(f"Error displaying flow rates: {{e}}") - - # Show inputs and outputs - col1, col2 = st.columns(2) - - with col1: - st.subheader("Inputs") - for input_name in bus.inputs: - st.write(f"- {{input_name}}") - - with col2: - st.subheader("Outputs") - for output_name in bus.outputs: - st.write(f"- {{output_name}}") - -# Effects page -elif selected_page == "Effects": - st.title("Effects") - - # Effect selector - effect_names = list(results.effects.keys()) - - if effect_names: - effect_name = st.selectbox("Select an effect:", effect_names) - - if effect_name: - effect = results.effects[effect_name] - - st.header(f"Effect: {{effect_name}}") - - # List variables - st.subheader("Variables") - - for var_name in effect._variables: - try: - var = effect.variables[var_name] - - # Create an expander for each variable - with st.expander(f"Variable: {{var_name}}"): - var_solution = var.solution - - # Check if this is a time-based variable - if 'time' in var_solution.dims: - # Plot time series - df = var_solution.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[var_name], - mode='lines', - name=var_name - )) - - fig.update_layout( - title=f"{{var_name}} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=300 - ) - - st.plotly_chart(fig, use_container_width=True) - else: - # Show scalar value - st.write(f"Value: {{var_solution.values}}") - except Exception as e: - st.error(f"Error displaying variable {{var_name}}: {{e}}") - else: - st.info("No effects available in this calculation.") - -# Variables page -elif selected_page == "Variables": - st.title("Model Variables") - - # Add a filter option - variable_filter = st.text_input("Filter variables by name:") - - # Get all variables and apply filter - all_variables = list(results.model.variables) - - if variable_filter: - filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] - else: - filtered_variables = all_variables - - st.write(f"Showing {{len(filtered_variables)}} of {{len(all_variables)}} variables") - - # Variable selection - variable_name = st.selectbox("Select a variable:", filtered_variables) - - if variable_name: - try: - variable = results.model.variables[variable_name] - var_solution = variable.solution - - st.header(f"Variable: {{variable_name}}") - - # Basic info - st.subheader("Information") - st.write(f"**Dimensions:** {{', '.join(var_solution.dims)}}") - st.write(f"**Shape:** {{var_solution.shape}}") - - # Visualization based on dimensionality - if 'time' in var_solution.dims: - st.subheader("Time Series") - - df = var_solution.to_dataframe() - - # Simple case: just time dimension - if len(df.columns) == 1: - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[variable_name], - mode='lines', - name=variable_name - )) - - fig.update_layout( - title=f"{{variable_name}} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=500 - ) - - st.plotly_chart(fig, use_container_width=True) - - # Also show as dataframe if requested - if st.checkbox("Show data as table"): - st.dataframe(df) - else: - # Multi-dimensional - st.write("This variable has multiple dimensions. Choose visualization type:") - - viz_type = st.radio( - "Visualization type:", - ["Line chart (all dimensions)", "Heatmap", "Raw data table"] - ) - - if viz_type == "Line chart (all dimensions)": - fig = go.Figure() - - # Limited to first 20 dimensions to avoid overloading - columns_to_plot = list(df.columns)[:20] - - if len(df.columns) > 20: - st.warning(f"Variable has {{len(df.columns)}} dimensions. Showing only first 20.") - - for column in columns_to_plot: - fig.add_trace(go.Scatter( - x=df.index, - y=df[column], - mode='lines', - name=str(column) - )) - - fig.update_layout( - title=f"{{variable_name}} Time Series (Multiple Dimensions)", - xaxis_title="Time", - yaxis_title="Value", - height=600 - ) - - st.plotly_chart(fig, use_container_width=True) - - elif viz_type == "Heatmap": - # Try to use the built-in heatmap function - try: - fig = results.plot_heatmap( - variable=variable_name, - show=False, - save=False - ) - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error creating heatmap: {{e}}") - - elif viz_type == "Raw data table": - st.dataframe(df) - else: - # Non-time series data - st.subheader("Values") - st.write(var_solution.values) - except Exception as e: - st.error(f"Error displaying variable {{variable_name}}: {{e}}") - -# Heatmaps page -elif selected_page == "Heatmaps": - st.title("Heatmap Generator") - - # Get time-based variables - time_vars = [var_name for var_name, var in results.model.variables.items() - if 'time' in var.solution.dims] - - # Variable selection - variable_name = st.selectbox("Select a variable:", time_vars) - - if variable_name: - # Configure heatmap settings - st.subheader("Heatmap Settings") - - col1, col2, col3 = st.columns(3) - - with col1: - timeframes = st.selectbox( - "Timeframe grouping:", - ["YS", "MS", "W", "D", "h", "15min", "min"], - index=2 # Default to "W" - ) - - with col2: - timesteps = st.selectbox( - "Timesteps per frame:", - ["W", "D", "h", "15min", "min"], - index=2 # Default to "h" - ) - - with col3: - color_map = st.selectbox( - "Color map:", - ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], - index=0 - ) - - # Generate button - if st.button("Generate Heatmap"): - try: - st.subheader(f"Heatmap for {{variable_name}}") - - fig = results.plot_heatmap( - variable=variable_name, - heatmap_timeframes=timeframes, - heatmap_timesteps_per_frame=timesteps, - color_map=color_map, - show=False, - save=False - ) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error generating heatmap: {{e}}") -""" - - f.write(app_code.encode('utf-8')) - - # Construct the command to run Streamlit - cmd = [sys.executable, "-m", "streamlit", "run", app_path, "--server.port", str(port)] - # Launch the Streamlit app process = subprocess.Popen(cmd) - + # Open browser webbrowser.open(f'http://localhost:{port}') - - # Set up cleanup of temporary file when the process exits - def cleanup_temp_file(): - try: - os.unlink(app_path) - print(f"Cleaned up temporary app file: {app_path}") - except Exception as e: - print(f"Error cleaning up temporary app file: {e}") - - atexit.register(cleanup_temp_file) - - print(f"Streamlit app launched on port {port}. Press Ctrl+C to stop the app.") - + + print(f'Streamlit app launched on port {port}. Press Ctrl+C to stop the app.') + return process From 44496872b5c9bc4298139e243d0e4c4b94015347 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:03:55 +0100 Subject: [PATCH 56/87] Update streamlit app --- flixOpt/explorer_app.py | 294 ++++++++-------------------------------- 1 file changed, 60 insertions(+), 234 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index 1feffc082..f2afd68d3 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -11,6 +11,8 @@ import plotly.graph_objects as go import streamlit as st +from flixOpt import plotting + # Parse command line arguments if __name__ == "__main__": parser = argparse.ArgumentParser(description='FlixOpt Results Explorer') @@ -161,126 +163,52 @@ def get_calculation_results(folder, name): st.info("This is a storage component") # Component tabs - tabs = st.tabs(["Flow Rates", "Charge State (if storage)", "Variables & Constraints"]) + tabs = st.tabs(["Node Balance", "Variables & Constraints"]) - # Flow Rates tab + # Node Balance tab with tabs[0]: try: - st.subheader("Flow Rates") + st.subheader("Node Balance") # Use built-in plotting method - fig = get_plotly_fig(component.plot_flow_rates) + if component.is_storage: + fig = get_plotly_fig(component.plot_charge_state) + else: + fig = get_plotly_fig(component.plot_flow_rates) + st.plotly_chart(fig, use_container_width=True) # Also show as dataframe if requested - if st.checkbox("Show flow rates as table"): - flow_rates = component.flow_rates(with_last_timestep=True).to_dataframe() + if st.checkbox("Show Data Table"): + if component.is_storage: + flow_rates = component.charge_state_and_flow_rates().to_dataframe() + else: + flow_rates = component.flow_rates().to_dataframe() st.dataframe(flow_rates) except Exception as e: - st.error(f"Error displaying flow rates: {e}") - - # Charge State tab - with tabs[1]: - if component.is_storage: - try: - st.subheader("Charge State") - - # Use built-in charge state plotting method - fig = get_plotly_fig(component.plot_charge_state) - st.plotly_chart(fig, use_container_width=True) - - # Show statistics - st.subheader("Charge State Statistics") - charge_state = component.charge_state.solution.to_dataframe() - charge_vals = charge_state.values.flatten() - - col1, col2, col3, col4 = st.columns(4) - col1.metric("Minimum", f"{charge_vals.min():.2f}") - col2.metric("Maximum", f"{charge_vals.max():.2f}") - col3.metric("Average", f"{charge_vals.mean():.2f}") - col4.metric("Final", f"{charge_vals[-1]:.2f}") - - # Also show as dataframe if requested - if st.checkbox("Show charge state as table"): - st.dataframe(charge_state) - except Exception as e: - st.error(f"Error displaying charge state: {e}") - else: - st.info(f"Component {component_name} is not a storage component.") + st.error(f"Error displaying the ndoe balance: {e}") # Variables tab - with tabs[2]: - col1, col2 = st.columns(2) - - with col1: - st.subheader("Variables") - for var_name in component._variables: - with st.expander(f"Variable: {var_name}"): - try: - var = component.variables[var_name] - var_solution = var.solution - - # Check if this is a time-based variable - if 'time' in var_solution.dims: - # Plot time series - df = var_solution.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[var_name], - mode='lines', - name=var_name - )) - - fig.update_layout( - title=f"{var_name} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=300 - ) - - st.plotly_chart(fig, use_container_width=True) - else: - # Show scalar value - st.write(f"Value: {var_solution.values}") - except Exception as e: - st.error(f"Error displaying variable {var_name}: {e}") - - with col2: - st.subheader("Constraints") - for constraint_name in component._constraints: - with st.expander(f"Constraint: {constraint_name}"): - try: - constraint = component.constraints[constraint_name] - st.write(f"Constraint type: {constraint.sense}") - - # If constraint has a time dimension, try to plot it - if hasattr(constraint, 'dual'): - dual = constraint.dual - if hasattr(dual, 'dims') and 'time' in dual.dims: - df = dual.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[constraint_name], - mode='lines', - name='Dual Value' - )) + with tabs[1]: + st.subheader("Variables") + for var_name in component._variables: + with st.expander(f"Variable: {var_name}"): + try: + var = component.variables[var_name] + var_solution = var.solution - fig.update_layout( - title=f"Dual Values for {constraint_name}", - xaxis_title="Time", - yaxis_title="Value", - height=300 - ) + # Check if this is a time-based variable + if 'time' in var_solution.dims: + # Plot time series + fig = get_plotly_fig(plotting.with_plotly, data=var_solution.to_dataframe(), mode='area', title=f'Variable: {var_name}') + fig.update_layout(height=300) - st.plotly_chart(fig, use_container_width=True) - else: - st.write(f"Dual value: {dual}") - except Exception as e: - st.error(f"Error displaying constraint {constraint_name}: {e}") + st.plotly_chart(fig, use_container_width=True) + else: + # Show scalar value + st.write(f"Value: {var_solution.values}") + except Exception as e: + st.error(f"Error displaying variable {var_name}: {e}") # Buses page elif selected_page == "Buses": @@ -295,58 +223,20 @@ def get_calculation_results(folder, name): st.header(f"Bus: {bus_name}") - # Show flow rates + # Show Node Balance try: - st.subheader("Flow Rates") + st.subheader("Node Balance") # Use built-in plotting method fig = get_plotly_fig(bus.plot_flow_rates) - st.plotly_chart(fig, use_container_width=True) - - # Calculate and show balance - st.subheader("Flow Balance") - - flow_rates = bus.flow_rates(with_last_timestep=True).to_dataframe() - inputs = [col for col in flow_rates.columns if col in bus.inputs] - outputs = [col for col in flow_rates.columns if col in bus.outputs] - - balance_df = pd.DataFrame(index=flow_rates.index) - - if inputs: - balance_df['Total Input'] = flow_rates[inputs].sum(axis=1) - else: - balance_df['Total Input'] = 0 - - if outputs: - balance_df['Total Output'] = flow_rates[outputs].sum(axis=1) - else: - balance_df['Total Output'] = 0 - - balance_df['Net Flow'] = balance_df['Total Input'] + balance_df['Total Output'] - - fig = go.Figure() - for column in balance_df.columns: - fig.add_trace(go.Scatter( - x=balance_df.index, - y=balance_df[column], - mode='lines', - name=column - )) - - fig.update_layout( - title=f"Flow Balance for {bus_name}", - xaxis_title="Time", - yaxis_title="Flow Rate", - height=400 - ) - + df = bus.flow_rates().to_dataframe() st.plotly_chart(fig, use_container_width=True) # Also show as dataframe if requested - if st.checkbox("Show flow data as table"): - st.dataframe(flow_rates) + if st.checkbox("Show Data Table"): + st.dataframe(df) except Exception as e: - st.error(f"Error displaying flow rates: {e}") + st.error(f"Error displaying the node balance: {e}") # Show inputs and outputs col1, col2 = st.columns(2) @@ -498,100 +388,36 @@ def get_calculation_results(folder, name): st.header(f"Variable: {variable_name}") - # Basic info - st.subheader("Information") - st.write(f"**Dimensions:** {', '.join(var_solution.dims)}") - st.write(f"**Shape:** {var_solution.shape}") - # Visualization based on dimensionality if 'time' in var_solution.dims: st.subheader("Time Series") - - df = var_solution.to_dataframe() - - # Simple case: just time dimension - if len(df.columns) == 1: - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[variable_name], - mode='lines', - name=variable_name - )) - - fig.update_layout( - title=f"{variable_name} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=500 + try: + fig = get_plotly_fig( + plotting.with_plotly, + data=var_solution.to_dataframe(), + mode='area', + title=f'Variable: {variable_name}', ) + fig.update_layout(height=300) st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f'Error displaying variable {variable_name}: {e}') - # Also show as dataframe if requested - if st.checkbox("Show data as table"): - st.dataframe(df) - else: - # Multi-dimensional - st.write("This variable has multiple dimensions. Choose visualization type:") - - viz_type = st.radio( - "Visualization type:", - ["Line chart (all dimensions)", "Heatmap", "Raw data table"] - ) - - if viz_type == "Line chart (all dimensions)": - fig = go.Figure() - - # Limited to first 20 dimensions to avoid overloading - columns_to_plot = list(df.columns)[:20] - - if len(df.columns) > 20: - st.warning(f"Variable has {len(df.columns)} dimensions. Showing only first 20.") - - for column in columns_to_plot: - fig.add_trace(go.Scatter( - x=df.index, - y=df[column], - mode='lines', - name=str(column) - )) - - fig.update_layout( - title=f"{variable_name} Time Series (Multiple Dimensions)", - xaxis_title="Time", - yaxis_title="Value", - height=600 - ) - - st.plotly_chart(fig, use_container_width=True) - - elif viz_type == "Heatmap": - # Use the built-in heatmap function - try: - fig = get_plotly_fig(results.plot_heatmap, variable=variable_name) - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error creating heatmap with built-in function: {e}") - - # Fallback to basic heatmap - try: - st.write("Using basic heatmap visualization instead:") - fig = px.imshow( - df.pivot_table(columns='time').T, - color_continuous_scale="Blues", - title=f"Heatmap for {variable_name}" - ) - st.plotly_chart(fig, use_container_width=True) - except Exception as e2: - st.error(f"Error creating basic heatmap: {e2}") + df = var_solution.to_dataframe() - elif viz_type == "Raw data table": - st.dataframe(df) + if st.checkbox("Show data as table"): + st.dataframe(df) else: # Non-time series data st.subheader("Values") - st.write(var_solution.values) + st.write(var_solution.values.item()) + + # Basic info + st.subheader("Stats") + st.write(f"**Dimensions:** {', '.join(var_solution.dims)}") + st.write(f"**Shape:** {var_solution.shape}") + except Exception as e: st.error(f"Error displaying variable {variable_name}: {e}") @@ -616,7 +442,7 @@ def get_calculation_results(folder, name): timeframes = st.selectbox( "Timeframe grouping:", ["YS", "MS", "W", "D", "h", "15min", "min"], - index=2 # Default to "W" + index=3 # Default to "W" ) with col2: From a90f0ab153dd83d583460cf875608346338c5063 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:23:37 +0100 Subject: [PATCH 57/87] Update streamlit app --- flixOpt/explorer_app.py | 260 +++++++++++++++++++++++++++++++--------- 1 file changed, 206 insertions(+), 54 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index f2afd68d3..616fa9091 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -163,7 +163,7 @@ def get_calculation_results(folder, name): st.info("This is a storage component") # Component tabs - tabs = st.tabs(["Node Balance", "Variables & Constraints"]) + tabs = st.tabs(["Node Balance", "All Variables"]) # Node Balance tab with tabs[0]: @@ -186,29 +186,92 @@ def get_calculation_results(folder, name): flow_rates = component.flow_rates().to_dataframe() st.dataframe(flow_rates) except Exception as e: - st.error(f"Error displaying the ndoe balance: {e}") + st.error(f"Error displaying the node balance: {e}") # Variables tab with tabs[1]: - st.subheader("Variables") - for var_name in component._variables: - with st.expander(f"Variable: {var_name}"): - try: - var = component.variables[var_name] - var_solution = var.solution + st.title("Model Variables") - # Check if this is a time-based variable - if 'time' in var_solution.dims: - # Plot time series + # Add a filter option + variable_filter = st.text_input("Filter variables by name:") + + # Get all variables and apply filter + all_variables = list(component.variables) + + if variable_filter: + filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] + else: + filtered_variables = all_variables + + # Heatmap options in a single row + show_heatmap_col, heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(4) + with show_heatmap_col: + show_heatmap = st.checkbox('Show as heatmap', value=False) + with heatmap_col1: + timeframes = st.selectbox( + 'Timeframes', + ['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], + index=3, # Default to "D" + ) + with heatmap_col2: + timesteps = st.selectbox( + 'Timesteps', + ['W', 'D', 'h', '15min', 'min'], + index=2, # Default to "h" + ) + with heatmap_col3: + color_map = st.selectbox( + 'Colormap', + ['portland', 'viridis', 'plasma', 'inferno', 'magma', 'cividis', 'RdBu', 'Blues', 'YlOrRd'], + index=0, + ) + + st.write(f"Showing {len(filtered_variables)} of {len(all_variables)} variables") + + # Display all filtered variables directly + for var_name in filtered_variables: + try: + var = component.variables[var_name] + var_solution = var.solution + + # Check if this is a time-based variable + if 'time' in var_solution.dims: + if show_heatmap: + try: + # Create heatmap using var_solution + heatmap_data = plotting.heat_map_data_from_df( + var_solution.to_dataframe(var_name), + timeframes, + timesteps, + 'ffill' + ) + + fig = plotting.heat_map_plotly( + heatmap_data, + title=var_name, + color_map=color_map, + xlabel=f'timeframe [{timeframes}]', + ylabel=f'timesteps [{timesteps}]' + ) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error creating heatmap: {e}") + else: + # Regular time series plot fig = get_plotly_fig(plotting.with_plotly, data=var_solution.to_dataframe(), mode='area', title=f'Variable: {var_name}') fig.update_layout(height=300) - st.plotly_chart(fig, use_container_width=True) - else: - # Show scalar value - st.write(f"Value: {var_solution.values}") - except Exception as e: - st.error(f"Error displaying variable {var_name}: {e}") + + show_datatable = st.checkbox(f'Show data table', key=f'datatable_{var_name}', value=False) + if show_datatable: + st.dataframe(var_solution.to_dataframe()) + + else: + # Show scalar value + st.write(f"{var_name}: {var_solution.values}") + except Exception as e: + st.error(f"Error displaying variable {var_name}: {e}") # Buses page elif selected_page == "Buses": @@ -279,25 +342,74 @@ def get_calculation_results(folder, name): # Check if this is a time-based variable if 'time' in var_solution.dims: - # Plot time series - df = var_solution.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[var_name], - mode='lines', - name=var_name - )) - - fig.update_layout( - title=f"{var_name} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=300 - ) + # Plot as heatmap toggle + show_heatmap = st.checkbox("Show as heatmap", key=f"heatmap_{var_name}", value=False) + + if show_heatmap: + # Heatmap options in a single row + heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(3) + with heatmap_col1: + timeframes = st.selectbox( + "Timeframes", + ["YS", "MS", "W", "D", "h", "15min", "min"], + index=3, # Default to "D" + key=f"timeframes_{var_name}" + ) + with heatmap_col2: + timesteps = st.selectbox( + "Timesteps", + ["W", "D", "h", "15min", "min"], + index=2, # Default to "h" + key=f"timesteps_{var_name}" + ) + with heatmap_col3: + color_map = st.selectbox( + "Colormap", + ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], + index=0, + key=f"colormap_{var_name}" + ) + + try: + # Create heatmap using var_solution + heatmap_data = plotting.heat_map_data_from_df( + var_solution.to_dataframe(var_name), + timeframes, + timesteps, + 'ffill' + ) + + fig = plotting.heat_map_plotly( + heatmap_data, + title=var_name, + color_map=color_map, + xlabel=f'timeframe [{timeframes}]', + ylabel=f'timesteps [{timesteps}]' + ) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error creating heatmap: {e}") + else: + # Regular time series plot + df = var_solution.to_dataframe() - st.plotly_chart(fig, use_container_width=True) + fig = go.Figure() + fig.add_trace(go.Scatter( + x=df.index, + y=df[var_name], + mode='lines', + name=var_name + )) + + fig.update_layout( + title=f"{var_name} Time Series", + xaxis_title="Time", + yaxis_title="Value", + height=300 + ) + + st.plotly_chart(fig, use_container_width=True) else: # Show scalar value st.write(f"Value: {var_solution.values}") @@ -390,19 +502,58 @@ def get_calculation_results(folder, name): # Visualization based on dimensionality if 'time' in var_solution.dims: - st.subheader("Time Series") - try: - fig = get_plotly_fig( - plotting.with_plotly, - data=var_solution.to_dataframe(), - mode='area', - title=f'Variable: {variable_name}', - ) - fig.update_layout(height=300) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f'Error displaying variable {variable_name}: {e}') + # Plot as heatmap toggle + show_heatmap = st.checkbox("Show as heatmap", value=False) + + if show_heatmap: + # Heatmap options in a single row + heatmap_cols = st.columns(3) + with heatmap_cols[0]: + timeframes = st.selectbox( + "Timeframes", + ["YS", "MS", "W", "D", "h", "15min", "min"], + index=3 # Default to "D" + ) + with heatmap_cols[1]: + timesteps = st.selectbox( + "Timesteps", + ["W", "D", "h", "15min", "min"], + index=2 # Default to "h" + ) + with heatmap_cols[2]: + color_map = st.selectbox( + "Colormap", + ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], + index=0 + ) + + try: + # Create heatmap using results.plot_heatmap + fig = get_plotly_fig( + results.plot_heatmap, + variable=variable_name, + heatmap_timeframes=timeframes, + heatmap_timesteps_per_frame=timesteps, + color_map=color_map + ) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error creating heatmap: {e}") + else: + # Regular time series plot + try: + fig = get_plotly_fig( + plotting.with_plotly, + data=var_solution.to_dataframe(), + mode='area', + title=f'Variable: {variable_name}', + ) + fig.update_layout(height=300) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f'Error displaying variable {variable_name}: {e}') df = var_solution.to_dataframe() @@ -433,26 +584,27 @@ def get_calculation_results(folder, name): variable_name = st.selectbox("Select a variable:", time_vars) if variable_name: - # Configure heatmap settings + # Configure heatmap settings in one row st.subheader("Heatmap Settings") - col1, col2, col3 = st.columns(3) + # All options in a single row + cols = st.columns(3) - with col1: + with cols[0]: timeframes = st.selectbox( "Timeframe grouping:", ["YS", "MS", "W", "D", "h", "15min", "min"], - index=3 # Default to "W" + index=3 # Default to "D" ) - with col2: + with cols[1]: timesteps = st.selectbox( "Timesteps per frame:", ["W", "D", "h", "15min", "min"], index=2 # Default to "h" ) - with col3: + with cols[2]: color_map = st.selectbox( "Color map:", ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], From 11d393f44b007f1f025df59058354559bb16201b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:28:53 +0100 Subject: [PATCH 58/87] Pack logic for displaying vars into function --- flixOpt/explorer_app.py | 551 +++++++++++++++------------------------- 1 file changed, 208 insertions(+), 343 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index 616fa9091..a11c03f42 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -45,6 +45,165 @@ def get_plotly_fig(plot_func, *args, **kwargs): # Call the plotting function return plot_func(*args, **kwargs) +# Reusable function to display variables +def display_variables(variables_dict, prefix=""): + """ + Display variables from a dictionary with options for visualization + + Args: + variables_dict: Dictionary of variables + prefix: Prefix for widget keys to avoid collisions + """ + # Add a filter option + variable_filter = st.text_input("Filter variables by name:", key=f"{prefix}_filter") + + # Get all variables and apply filter + all_variables = list(variables_dict) + + if variable_filter: + filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] + else: + filtered_variables = all_variables + + # Heatmap options in a single row + show_heatmap_col, heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(4) + with show_heatmap_col: + show_heatmap = st.checkbox('Show as heatmap', value=False, key=f"{prefix}_heatmap") + with heatmap_col1: + timeframes = st.selectbox( + 'Timeframes', + ['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], + index=3, # Default to "D" + key=f"{prefix}_timeframes" + ) + with heatmap_col2: + timesteps = st.selectbox( + 'Timesteps', + ['W', 'D', 'h', '15min', 'min'], + index=2, # Default to "h" + key=f"{prefix}_timesteps" + ) + with heatmap_col3: + color_map = st.selectbox( + 'Colormap', + ['portland', 'viridis', 'plasma', 'inferno', 'magma', 'cividis', 'RdBu', 'Blues', 'YlOrRd'], + index=0, + key=f"{prefix}_colormap" + ) + + st.write(f"Showing {len(filtered_variables)} of {len(all_variables)} variables") + + # Display all filtered variables directly + for var_name in filtered_variables: + try: + var = variables_dict[var_name] + var_solution = var.solution + + # Add a divider for each variable + st.markdown(f"### {var_name}") + + # Check if this is a time-based variable + if 'time' in var_solution.dims: + if show_heatmap: + try: + # Create heatmap using var_solution + heatmap_data = plotting.heat_map_data_from_df( + var_solution.to_dataframe(var_name), + timeframes, + timesteps, + 'ffill' + ) + + fig = plotting.heat_map_plotly( + heatmap_data, + title=var_name, + color_map=color_map, + xlabel=f'timeframe [{timeframes}]', + ylabel=f'timesteps [{timesteps}]' + ) + + st.plotly_chart(fig, use_container_width=True) + except Exception as e: + st.error(f"Error creating heatmap: {e}") + else: + # Regular time series plot + fig = get_plotly_fig(plotting.with_plotly, data=var_solution.to_dataframe(), mode='area', title=f'Variable: {var_name}') + fig.update_layout(height=300) + st.plotly_chart(fig, use_container_width=True) + + show_datatable = st.checkbox('Show data table', key=f'{prefix}_datatable_{var_name}', value=False) + if show_datatable: + st.dataframe(var_solution.to_dataframe()) + + else: + # Show scalar value + st.write(f"Value: {var_solution.values}") + except Exception as e: + st.error(f"Error displaying variable {var_name}: {e}") + +# Reusable function to display effect shares +def display_effect_shares(effect): + """ + Display shares for an effect + + Args: + effect: The effect to display shares for + """ + # List shares + connected_elements = set() + for var_name in effect._variables: + if '->' in var_name: + elem = var_name.split('->')[0] + connected_elements.add(elem) + + if connected_elements: + st.subheader("Element Shares") + + for elem in sorted(connected_elements): + st.markdown(f"### Shares from {elem}") + try: + shares = effect.get_shares_from(elem) + + # Plot shares if time-based + time_shares = [s for s in shares if 'time' in shares[s].solution.dims] + + if time_shares: + df = pd.DataFrame() + for share_name in time_shares: + share_df = shares[share_name].solution.to_dataframe() + df[share_name] = share_df[share_name] + + fig = go.Figure() + for col in df.columns: + fig.add_trace(go.Scatter( + x=df.index, + y=df[col], + mode='lines', + name=col + )) + + fig.update_layout( + title=f"Shares from {elem}", + xaxis_title="Time", + yaxis_title="Share", + height=400 + ) + + st.plotly_chart(fig, use_container_width=True) + else: + # Display as simple table + share_data = [] + for share_name in shares: + share_data.append({ + "Share": share_name, + "Value": float(shares[share_name].solution.values.flatten()[0]) + }) + + if share_data: + st.table(pd.DataFrame(share_data)) + except Exception as e: + st.error(f"Error displaying shares from {elem}: {e}") + # Cache the calculation loading @st.cache_resource def get_calculation_results(folder, name): @@ -190,88 +349,8 @@ def get_calculation_results(folder, name): # Variables tab with tabs[1]: - st.title("Model Variables") - - # Add a filter option - variable_filter = st.text_input("Filter variables by name:") - - # Get all variables and apply filter - all_variables = list(component.variables) - - if variable_filter: - filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] - else: - filtered_variables = all_variables - - # Heatmap options in a single row - show_heatmap_col, heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(4) - with show_heatmap_col: - show_heatmap = st.checkbox('Show as heatmap', value=False) - with heatmap_col1: - timeframes = st.selectbox( - 'Timeframes', - ['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], - index=3, # Default to "D" - ) - with heatmap_col2: - timesteps = st.selectbox( - 'Timesteps', - ['W', 'D', 'h', '15min', 'min'], - index=2, # Default to "h" - ) - with heatmap_col3: - color_map = st.selectbox( - 'Colormap', - ['portland', 'viridis', 'plasma', 'inferno', 'magma', 'cividis', 'RdBu', 'Blues', 'YlOrRd'], - index=0, - ) - - st.write(f"Showing {len(filtered_variables)} of {len(all_variables)} variables") - - # Display all filtered variables directly - for var_name in filtered_variables: - try: - var = component.variables[var_name] - var_solution = var.solution - - # Check if this is a time-based variable - if 'time' in var_solution.dims: - if show_heatmap: - try: - # Create heatmap using var_solution - heatmap_data = plotting.heat_map_data_from_df( - var_solution.to_dataframe(var_name), - timeframes, - timesteps, - 'ffill' - ) - - fig = plotting.heat_map_plotly( - heatmap_data, - title=var_name, - color_map=color_map, - xlabel=f'timeframe [{timeframes}]', - ylabel=f'timesteps [{timesteps}]' - ) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error creating heatmap: {e}") - else: - # Regular time series plot - fig = get_plotly_fig(plotting.with_plotly, data=var_solution.to_dataframe(), mode='area', title=f'Variable: {var_name}') - fig.update_layout(height=300) - st.plotly_chart(fig, use_container_width=True) - - show_datatable = st.checkbox(f'Show data table', key=f'datatable_{var_name}', value=False) - if show_datatable: - st.dataframe(var_solution.to_dataframe()) - - else: - # Show scalar value - st.write(f"{var_name}: {var_solution.values}") - except Exception as e: - st.error(f"Error displaying variable {var_name}: {e}") + # Use the reusable function + display_variables(component.variables, prefix=f"comp_{component_name}") # Buses page elif selected_page == "Buses": @@ -286,33 +365,40 @@ def get_calculation_results(folder, name): st.header(f"Bus: {bus_name}") - # Show Node Balance - try: - st.subheader("Node Balance") + # Bus tabs + tabs = st.tabs(["Node Balance", "All Variables"]) - # Use built-in plotting method - fig = get_plotly_fig(bus.plot_flow_rates) - df = bus.flow_rates().to_dataframe() - st.plotly_chart(fig, use_container_width=True) + # Node Balance tab + with tabs[0]: + try: + st.subheader("Node Balance") - # Also show as dataframe if requested - if st.checkbox("Show Data Table"): - st.dataframe(df) - except Exception as e: - st.error(f"Error displaying the node balance: {e}") + # Use built-in plotting method + fig = get_plotly_fig(bus.plot_flow_rates) + st.plotly_chart(fig, use_container_width=True) - # Show inputs and outputs - col1, col2 = st.columns(2) + # Also show as dataframe if requested + if st.checkbox("Show Data Table"): + df = bus.flow_rates().to_dataframe() + st.dataframe(df) - with col1: - st.subheader("Inputs") - for input_name in bus.inputs: - st.write(f"- {input_name}") + # Show inputs and outputs + col1, col2 = st.columns(2) + with col1: + st.subheader("Inputs") + for input_name in bus.inputs: + st.write(f"- {input_name}") + with col2: + st.subheader("Outputs") + for output_name in bus.outputs: + st.write(f"- {output_name}") + except Exception as e: + st.error(f"Error displaying the node balance: {e}") - with col2: - st.subheader("Outputs") - for output_name in bus.outputs: - st.write(f"- {output_name}") + # Variables tab + with tabs[1]: + # Use the reusable function + display_variables(bus.variables, prefix=f"bus_{bus_name}") # Effects page elif selected_page == "Effects": @@ -329,147 +415,18 @@ def get_calculation_results(folder, name): st.header(f"Effect: {effect_name}") - # List variables - st.subheader("Variables") - - for var_name in effect._variables: - try: - var = effect.variables[var_name] - - # Create an expander for each variable - with st.expander(f"Variable: {var_name}"): - var_solution = var.solution - - # Check if this is a time-based variable - if 'time' in var_solution.dims: - # Plot as heatmap toggle - show_heatmap = st.checkbox("Show as heatmap", key=f"heatmap_{var_name}", value=False) - - if show_heatmap: - # Heatmap options in a single row - heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(3) - with heatmap_col1: - timeframes = st.selectbox( - "Timeframes", - ["YS", "MS", "W", "D", "h", "15min", "min"], - index=3, # Default to "D" - key=f"timeframes_{var_name}" - ) - with heatmap_col2: - timesteps = st.selectbox( - "Timesteps", - ["W", "D", "h", "15min", "min"], - index=2, # Default to "h" - key=f"timesteps_{var_name}" - ) - with heatmap_col3: - color_map = st.selectbox( - "Colormap", - ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], - index=0, - key=f"colormap_{var_name}" - ) - - try: - # Create heatmap using var_solution - heatmap_data = plotting.heat_map_data_from_df( - var_solution.to_dataframe(var_name), - timeframes, - timesteps, - 'ffill' - ) - - fig = plotting.heat_map_plotly( - heatmap_data, - title=var_name, - color_map=color_map, - xlabel=f'timeframe [{timeframes}]', - ylabel=f'timesteps [{timesteps}]' - ) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error creating heatmap: {e}") - else: - # Regular time series plot - df = var_solution.to_dataframe() - - fig = go.Figure() - fig.add_trace(go.Scatter( - x=df.index, - y=df[var_name], - mode='lines', - name=var_name - )) - - fig.update_layout( - title=f"{var_name} Time Series", - xaxis_title="Time", - yaxis_title="Value", - height=300 - ) - - st.plotly_chart(fig, use_container_width=True) - else: - # Show scalar value - st.write(f"Value: {var_solution.values}") - except Exception as e: - st.error(f"Error displaying variable {var_name}: {e}") - - # List shares - connected_elements = set() - for var_name in effect._variables: - if '->' in var_name: - elem = var_name.split('->')[0] - connected_elements.add(elem) - - if connected_elements: - st.subheader("Element Shares") - - for elem in sorted(connected_elements): - with st.expander(f"Shares from {elem}"): - try: - shares = effect.get_shares_from(elem) - - # Plot shares if time-based - time_shares = [s for s in shares if 'time' in shares[s].solution.dims] - - if time_shares: - df = pd.DataFrame() - for share_name in time_shares: - share_df = shares[share_name].solution.to_dataframe() - df[share_name] = share_df[share_name] - - fig = go.Figure() - for col in df.columns: - fig.add_trace(go.Scatter( - x=df.index, - y=df[col], - mode='lines', - name=col - )) - - fig.update_layout( - title=f"Shares from {elem}", - xaxis_title="Time", - yaxis_title="Share", - height=400 - ) - - st.plotly_chart(fig, use_container_width=True) - else: - # Display as simple table - share_data = [] - for share_name in shares: - share_data.append({ - "Share": share_name, - "Value": float(shares[share_name].solution.values.flatten()[0]) - }) - - if share_data: - st.table(pd.DataFrame(share_data)) - except Exception as e: - st.error(f"Error displaying shares from {elem}: {e}") + # Effect tabs + tabs = st.tabs(["Variables", "Element Shares"]) + + # Variables tab + with tabs[0]: + # Use the reusable function + display_variables(effect.variables, prefix=f"effect_{effect_name}") + + # Shares tab + with tabs[1]: + # Use the reusable function + display_effect_shares(effect) else: st.info("No effects available in this calculation.") @@ -477,100 +434,8 @@ def get_calculation_results(folder, name): elif selected_page == "Variables": st.title("Model Variables") - # Add a filter option - variable_filter = st.text_input("Filter variables by name:") - - # Get all variables and apply filter - all_variables = list(results.model.variables) - - if variable_filter: - filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] - else: - filtered_variables = all_variables - - st.write(f"Showing {len(filtered_variables)} of {len(all_variables)} variables") - - # Variable selection - variable_name = st.selectbox("Select a variable:", filtered_variables) - - if variable_name: - try: - variable = results.model.variables[variable_name] - var_solution = variable.solution - - st.header(f"Variable: {variable_name}") - - # Visualization based on dimensionality - if 'time' in var_solution.dims: - # Plot as heatmap toggle - show_heatmap = st.checkbox("Show as heatmap", value=False) - - if show_heatmap: - # Heatmap options in a single row - heatmap_cols = st.columns(3) - with heatmap_cols[0]: - timeframes = st.selectbox( - "Timeframes", - ["YS", "MS", "W", "D", "h", "15min", "min"], - index=3 # Default to "D" - ) - with heatmap_cols[1]: - timesteps = st.selectbox( - "Timesteps", - ["W", "D", "h", "15min", "min"], - index=2 # Default to "h" - ) - with heatmap_cols[2]: - color_map = st.selectbox( - "Colormap", - ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], - index=0 - ) - - try: - # Create heatmap using results.plot_heatmap - fig = get_plotly_fig( - results.plot_heatmap, - variable=variable_name, - heatmap_timeframes=timeframes, - heatmap_timesteps_per_frame=timesteps, - color_map=color_map - ) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error creating heatmap: {e}") - else: - # Regular time series plot - try: - fig = get_plotly_fig( - plotting.with_plotly, - data=var_solution.to_dataframe(), - mode='area', - title=f'Variable: {variable_name}', - ) - fig.update_layout(height=300) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f'Error displaying variable {variable_name}: {e}') - - df = var_solution.to_dataframe() - - if st.checkbox("Show data as table"): - st.dataframe(df) - else: - # Non-time series data - st.subheader("Values") - st.write(var_solution.values.item()) - - # Basic info - st.subheader("Stats") - st.write(f"**Dimensions:** {', '.join(var_solution.dims)}") - st.write(f"**Shape:** {var_solution.shape}") - - except Exception as e: - st.error(f"Error displaying variable {variable_name}: {e}") + # Use the reusable function for model variables + display_variables(results.model.variables, prefix="model") # Heatmaps page elif selected_page == "Heatmaps": @@ -588,23 +453,23 @@ def get_calculation_results(folder, name): st.subheader("Heatmap Settings") # All options in a single row - cols = st.columns(3) + col1, col2, col3 = st.columns(3) - with cols[0]: + with col1: timeframes = st.selectbox( "Timeframe grouping:", ["YS", "MS", "W", "D", "h", "15min", "min"], index=3 # Default to "D" ) - with cols[1]: + with col2: timesteps = st.selectbox( "Timesteps per frame:", ["W", "D", "h", "15min", "min"], index=2 # Default to "h" ) - with cols[2]: + with col3: color_map = st.selectbox( "Color map:", ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], From 18e49d63396a83e8711d8d11c7fe350a054d2e78 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:43:01 +0100 Subject: [PATCH 59/87] SImplify --- flixOpt/explorer_app.py | 152 ++-------------------------------------- 1 file changed, 4 insertions(+), 148 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index a11c03f42..a49235094 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -141,69 +141,6 @@ def display_variables(variables_dict, prefix=""): except Exception as e: st.error(f"Error displaying variable {var_name}: {e}") -# Reusable function to display effect shares -def display_effect_shares(effect): - """ - Display shares for an effect - - Args: - effect: The effect to display shares for - """ - # List shares - connected_elements = set() - for var_name in effect._variables: - if '->' in var_name: - elem = var_name.split('->')[0] - connected_elements.add(elem) - - if connected_elements: - st.subheader("Element Shares") - - for elem in sorted(connected_elements): - st.markdown(f"### Shares from {elem}") - try: - shares = effect.get_shares_from(elem) - - # Plot shares if time-based - time_shares = [s for s in shares if 'time' in shares[s].solution.dims] - - if time_shares: - df = pd.DataFrame() - for share_name in time_shares: - share_df = shares[share_name].solution.to_dataframe() - df[share_name] = share_df[share_name] - - fig = go.Figure() - for col in df.columns: - fig.add_trace(go.Scatter( - x=df.index, - y=df[col], - mode='lines', - name=col - )) - - fig.update_layout( - title=f"Shares from {elem}", - xaxis_title="Time", - yaxis_title="Share", - height=400 - ) - - st.plotly_chart(fig, use_container_width=True) - else: - # Display as simple table - share_data = [] - for share_name in shares: - share_data.append({ - "Share": share_name, - "Value": float(shares[share_name].solution.values.flatten()[0]) - }) - - if share_data: - st.table(pd.DataFrame(share_data)) - except Exception as e: - st.error(f"Error displaying shares from {elem}: {e}") - # Cache the calculation loading @st.cache_resource def get_calculation_results(folder, name): @@ -406,90 +343,9 @@ def get_calculation_results(folder, name): # Effect selector effect_names = list(results.effects.keys()) + effect_name = st.selectbox("Select an effect:", sorted(effect_names), index=0) + effect = results.effects[effect_name] - if effect_names: - effect_name = st.selectbox("Select an effect:", sorted(effect_names)) + st.header(f"Effect: {effect_name}") - if effect_name: - effect = results.effects[effect_name] - - st.header(f"Effect: {effect_name}") - - # Effect tabs - tabs = st.tabs(["Variables", "Element Shares"]) - - # Variables tab - with tabs[0]: - # Use the reusable function - display_variables(effect.variables, prefix=f"effect_{effect_name}") - - # Shares tab - with tabs[1]: - # Use the reusable function - display_effect_shares(effect) - else: - st.info("No effects available in this calculation.") - -# Variables page -elif selected_page == "Variables": - st.title("Model Variables") - - # Use the reusable function for model variables - display_variables(results.model.variables, prefix="model") - -# Heatmaps page -elif selected_page == "Heatmaps": - st.title("Heatmap Generator") - - # Get time-based variables - time_vars = [var_name for var_name, var in results.model.variables.items() - if 'time' in var.solution.dims] - - # Variable selection - variable_name = st.selectbox("Select a variable:", time_vars) - - if variable_name: - # Configure heatmap settings in one row - st.subheader("Heatmap Settings") - - # All options in a single row - col1, col2, col3 = st.columns(3) - - with col1: - timeframes = st.selectbox( - "Timeframe grouping:", - ["YS", "MS", "W", "D", "h", "15min", "min"], - index=3 # Default to "D" - ) - - with col2: - timesteps = st.selectbox( - "Timesteps per frame:", - ["W", "D", "h", "15min", "min"], - index=2 # Default to "h" - ) - - with col3: - color_map = st.selectbox( - "Color map:", - ["portland", "viridis", "plasma", "inferno", "magma", "cividis", "RdBu", "Blues", "YlOrRd"], - index=0 - ) - - # Generate button - if st.button("Generate Heatmap"): - try: - st.subheader(f"Heatmap for {variable_name}") - - # Use the built-in heatmap function - fig = get_plotly_fig( - results.plot_heatmap, - variable=variable_name, - heatmap_timeframes=timeframes, - heatmap_timesteps_per_frame=timesteps, - color_map=color_map - ) - - st.plotly_chart(fig, use_container_width=True) - except Exception as e: - st.error(f"Error generating heatmap: {e}") + display_variables(effect.variables, prefix=f"effect_{effect_name}") From 4b3c02dd158a7b1662e7184fd0d626a5f3276292 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:51:11 +0100 Subject: [PATCH 60/87] Make explorer more portable --- flixOpt/explorer_app.py | 409 ++++++++++++++++++++-------------------- 1 file changed, 206 insertions(+), 203 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index a49235094..fb45fab18 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -13,27 +13,6 @@ from flixOpt import plotting -# Parse command line arguments -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='FlixOpt Results Explorer') - parser.add_argument('folder', type=str, help='Results folder path') - parser.add_argument('name', type=str, help='Calculation name') - args = parser.parse_args() - - results_folder = args.folder - results_name = args.name -else: - # Default values when imported as module - results_folder = "." - results_name = "results" - -# Set page config -st.set_page_config( - page_title="FlixOpt Results Explorer", - page_icon="📊", - layout="wide", - initial_sidebar_state="expanded" -) # Helper function to capture plotly figures def get_plotly_fig(plot_func, *args, **kwargs): @@ -141,9 +120,202 @@ def display_variables(variables_dict, prefix=""): except Exception as e: st.error(f"Error displaying variable {var_name}: {e}") -# Cache the calculation loading -@st.cache_resource -def get_calculation_results(folder, name): +def explore_results_app(results): + """ + Main function to explore calculation results + + Args: + results: A CalculationResults object to explore + """ + # Set page config + st.set_page_config( + page_title="FlixOpt Results Explorer", + page_icon="📊", + layout="wide", + initial_sidebar_state="expanded" + ) + + # Create sidebar for navigation + st.sidebar.title("FlixOpt Results Explorer") + pages = ["Overview", "Components", "Buses", "Effects", "Variables", "Heatmaps"] + selected_page = st.sidebar.radio("Navigation", pages) + + # Overview page + if selected_page == "Overview": + st.title("Calculation Overview") + + # Model information + st.header("Model Information") + col1, col2 = st.columns(2) + + with col1: + st.write(f"**Name:** {results.name}") + st.write(f"**Folder:** {results.folder}") + st.write(f"**Time Steps:** {len(results.timesteps_extra)}") + if len(results.timesteps_extra) > 0: + st.write(f"**Time Range:** {results.timesteps_extra[0]} to {results.timesteps_extra[-1]}") + + with col2: + st.write(f"**Components:** {len(results.components)}") + st.write(f"**Buses:** {len(results.buses)}") + st.write(f"**Effects:** {len(results.effects)}") + st.write(f"**Storage Components:** {len(results.storages)}") + + # Additional info + if hasattr(results, 'infos') and results.infos: + st.subheader("Additional Information") + st.json(results.infos) + + # Network info + if hasattr(results, 'network_infos') and results.network_infos: + st.subheader("Network Information") + st.json(results.network_infos) + + # Network visualization + st.header("Network Structure") + + # Show component connections + st.subheader("Component Connections") + connections_data = [] + + for comp_name, comp in results.components.items(): + for bus_name in comp.inputs + comp.outputs: + connections_data.append({ + "Component": comp_name, + "Bus": bus_name, + "Type": "Input" if bus_name in comp.inputs else "Output" + }) + + if connections_data: + st.dataframe(pd.DataFrame(connections_data)) + + # Components page + elif selected_page == "Components": + st.title("Components") + + # Component selector + component_names = list(results.components.keys()) + + # Allow grouping by storage/non-storage + show_storage_first = st.checkbox("Show storage components first", value=True) + + if show_storage_first: + storage_components = [comp.label for comp in results.storages] + non_storage_components = [name for name in component_names if name not in storage_components] + sorted_components = storage_components + non_storage_components + else: + sorted_components = sorted(component_names) + + component_name = st.selectbox("Select a component:", sorted_components) + + if component_name: + component = results.components[component_name] + + st.header(f"Component: {component_name}") + if component.is_storage: + st.info("This is a storage component") + + # Component tabs + tabs = st.tabs(["Node Balance", "All Variables"]) + + # Node Balance tab + with tabs[0]: + try: + st.subheader("Node Balance") + + # Use built-in plotting method + if component.is_storage: + fig = get_plotly_fig(component.plot_charge_state) + else: + fig = get_plotly_fig(component.plot_flow_rates) + + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show Data Table"): + if component.is_storage: + flow_rates = component.charge_state_and_flow_rates().to_dataframe() + else: + flow_rates = component.flow_rates().to_dataframe() + st.dataframe(flow_rates) + except Exception as e: + st.error(f"Error displaying the node balance: {e}") + + # Variables tab + with tabs[1]: + # Use the reusable function + display_variables(component.variables, prefix=f"comp_{component_name}") + + # Buses page + elif selected_page == "Buses": + st.title("Buses") + + # Bus selector + bus_names = list(results.buses.keys()) + bus_name = st.selectbox("Select a bus:", sorted(bus_names)) + + if bus_name: + bus = results.buses[bus_name] + + st.header(f"Bus: {bus_name}") + + # Bus tabs + tabs = st.tabs(["Node Balance", "All Variables"]) + + # Node Balance tab + with tabs[0]: + try: + st.subheader("Node Balance") + + # Use built-in plotting method + fig = get_plotly_fig(bus.plot_flow_rates) + st.plotly_chart(fig, use_container_width=True) + + # Also show as dataframe if requested + if st.checkbox("Show Data Table"): + df = bus.flow_rates().to_dataframe() + st.dataframe(df) + + # Show inputs and outputs + col1, col2 = st.columns(2) + with col1: + st.subheader("Inputs") + for input_name in bus.inputs: + st.write(f"- {input_name}") + with col2: + st.subheader("Outputs") + for output_name in bus.outputs: + st.write(f"- {output_name}") + except Exception as e: + st.error(f"Error displaying the node balance: {e}") + + # Variables tab + with tabs[1]: + # Use the reusable function + display_variables(bus.variables, prefix=f"bus_{bus_name}") + + # Effects page + elif selected_page == "Effects": + st.title("Effects") + + # Effect selector + effect_names = list(results.effects.keys()) + effect_name = st.selectbox("Select an effect:", sorted(effect_names), index=0) + effect = results.effects[effect_name] + + st.header(f"Effect: {effect_name}") + + display_variables(effect.variables, prefix=f"effect_{effect_name}") + + +def run_explorer_from_file(folder, name): + """ + Run the explorer by loading results from a file + + Args: + folder: Folder path containing the calculation results + name: Name of the calculation + """ # Import the relevant modules try: # Try different import approaches @@ -166,186 +338,17 @@ def get_calculation_results(folder, name): from flixOpt.results import CalculationResults # Load from file - return CalculationResults.from_file(folder, name) + results = CalculationResults.from_file(folder, name) + explore_results_app(results) except Exception as e: st.error(f"Error loading calculation results: {e}") - return None - -# Load the calculation results -results = get_calculation_results(results_folder, results_name) - -if results is None: - st.error("Failed to load calculation results.") - st.stop() - -# Create sidebar for navigation -st.sidebar.title("FlixOpt Results Explorer") -pages = ["Overview", "Components", "Buses", "Effects", "Variables", "Heatmaps"] -selected_page = st.sidebar.radio("Navigation", pages) - -# Overview page -if selected_page == "Overview": - st.title("Calculation Overview") - - # Model information - st.header("Model Information") - col1, col2 = st.columns(2) - - with col1: - st.write(f"**Name:** {results.name}") - st.write(f"**Folder:** {results.folder}") - st.write(f"**Time Steps:** {len(results.timesteps_extra)}") - if len(results.timesteps_extra) > 0: - st.write(f"**Time Range:** {results.timesteps_extra[0]} to {results.timesteps_extra[-1]}") - - with col2: - st.write(f"**Components:** {len(results.components)}") - st.write(f"**Buses:** {len(results.buses)}") - st.write(f"**Effects:** {len(results.effects)}") - st.write(f"**Storage Components:** {len(results.storages)}") - - # Additional info - if hasattr(results, 'infos') and results.infos: - st.subheader("Additional Information") - st.json(results.infos) - - # Network info - if hasattr(results, 'network_infos') and results.network_infos: - st.subheader("Network Information") - st.json(results.network_infos) - - # Network visualization - st.header("Network Structure") - - # Show component connections - st.subheader("Component Connections") - connections_data = [] - - for comp_name, comp in results.components.items(): - for bus_name in comp.inputs + comp.outputs: - connections_data.append({ - "Component": comp_name, - "Bus": bus_name, - "Type": "Input" if bus_name in comp.inputs else "Output" - }) - - if connections_data: - st.dataframe(pd.DataFrame(connections_data)) - -# Components page -elif selected_page == "Components": - st.title("Components") - - # Component selector - component_names = list(results.components.keys()) - - # Allow grouping by storage/non-storage - show_storage_first = st.checkbox("Show storage components first", value=True) - - if show_storage_first: - storage_components = [comp.label for comp in results.storages] - non_storage_components = [name for name in component_names if name not in storage_components] - sorted_components = storage_components + non_storage_components - else: - sorted_components = sorted(component_names) - - component_name = st.selectbox("Select a component:", sorted_components) - - if component_name: - component = results.components[component_name] - - st.header(f"Component: {component_name}") - if component.is_storage: - st.info("This is a storage component") - - # Component tabs - tabs = st.tabs(["Node Balance", "All Variables"]) - - # Node Balance tab - with tabs[0]: - try: - st.subheader("Node Balance") - - # Use built-in plotting method - if component.is_storage: - fig = get_plotly_fig(component.plot_charge_state) - else: - fig = get_plotly_fig(component.plot_flow_rates) + st.stop() - st.plotly_chart(fig, use_container_width=True) - - # Also show as dataframe if requested - if st.checkbox("Show Data Table"): - if component.is_storage: - flow_rates = component.charge_state_and_flow_rates().to_dataframe() - else: - flow_rates = component.flow_rates().to_dataframe() - st.dataframe(flow_rates) - except Exception as e: - st.error(f"Error displaying the node balance: {e}") - - # Variables tab - with tabs[1]: - # Use the reusable function - display_variables(component.variables, prefix=f"comp_{component_name}") - -# Buses page -elif selected_page == "Buses": - st.title("Buses") - - # Bus selector - bus_names = list(results.buses.keys()) - bus_name = st.selectbox("Select a bus:", sorted(bus_names)) - - if bus_name: - bus = results.buses[bus_name] - - st.header(f"Bus: {bus_name}") - - # Bus tabs - tabs = st.tabs(["Node Balance", "All Variables"]) +# Entry point for module execution +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='FlixOpt Results Explorer') + parser.add_argument('folder', type=str, help='Results folder path') + parser.add_argument('name', type=str, help='Calculation name') + args = parser.parse_args() - # Node Balance tab - with tabs[0]: - try: - st.subheader("Node Balance") - - # Use built-in plotting method - fig = get_plotly_fig(bus.plot_flow_rates) - st.plotly_chart(fig, use_container_width=True) - - # Also show as dataframe if requested - if st.checkbox("Show Data Table"): - df = bus.flow_rates().to_dataframe() - st.dataframe(df) - - # Show inputs and outputs - col1, col2 = st.columns(2) - with col1: - st.subheader("Inputs") - for input_name in bus.inputs: - st.write(f"- {input_name}") - with col2: - st.subheader("Outputs") - for output_name in bus.outputs: - st.write(f"- {output_name}") - except Exception as e: - st.error(f"Error displaying the node balance: {e}") - - # Variables tab - with tabs[1]: - # Use the reusable function - display_variables(bus.variables, prefix=f"bus_{bus_name}") - -# Effects page -elif selected_page == "Effects": - st.title("Effects") - - # Effect selector - effect_names = list(results.effects.keys()) - effect_name = st.selectbox("Select an effect:", sorted(effect_names), index=0) - effect = results.effects[effect_name] - - st.header(f"Effect: {effect_name}") - - display_variables(effect.variables, prefix=f"effect_{effect_name}") + run_explorer_from_file(args.folder, args.name) From c4fbdcb8d5cc1af56561b5ee215b8ff22ae7b85e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 12:03:42 +0100 Subject: [PATCH 61/87] Make explorer more portable --- flixOpt/explorer_app.py | 1 + flixOpt/results.py | 2 +- flixOpt/results_explorer.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index fb45fab18..721014b58 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -120,6 +120,7 @@ def display_variables(variables_dict, prefix=""): except Exception as e: st.error(f"Error displaying variable {var_name}: {e}") + def explore_results_app(results): """ Main function to explore calculation results diff --git a/flixOpt/results.py b/flixOpt/results.py index 4f15b84ef..6233fdd86 100644 --- a/flixOpt/results.py +++ b/flixOpt/results.py @@ -43,7 +43,7 @@ class CalculationResults: The duration of each timestep in hours. """ - explore_results = explore_results + launch_dashboard = explore_results @classmethod def from_file(cls, folder: Union[str, pathlib.Path], name: str): diff --git a/flixOpt/results_explorer.py b/flixOpt/results_explorer.py index 359759885..a880775aa 100644 --- a/flixOpt/results_explorer.py +++ b/flixOpt/results_explorer.py @@ -52,6 +52,8 @@ def explore_results(self, port=8501): self.name, ] + self.to_file() # Save results to file. This is needed to be able to launch the app from the file. # TODO + # Launch the Streamlit app process = subprocess.Popen(cmd) From be465f2c3905e130a7f4a98663968006f8f527b4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:13:41 +0100 Subject: [PATCH 62/87] Improve dashoboard --- flixOpt/explorer_app.py | 25 ++++++++++++++++++------- flixOpt/plotting.py | 2 +- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index 721014b58..0f358eb78 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -100,15 +100,25 @@ def display_variables(variables_dict, prefix=""): xlabel=f'timeframe [{timeframes}]', ylabel=f'timesteps [{timesteps}]' ) + fig.update_layout( + margin=dict(l=50, r=100, t=50, b=50), # Extra space for colorbar + coloraxis_colorbar=dict( + lenmode='fraction', + len=0.8, + title='Scale', + tickvals=[0, 5, 10], # Force ticks at min, mid, max + ticktext=['0 (Min)', '5', '10 (Max)'], # Custom labels + ), # Make colorbar bigger + ) - st.plotly_chart(fig, use_container_width=True) + st.plotly_chart(fig, theme=None, use_container_width=True) except Exception as e: st.error(f"Error creating heatmap: {e}") else: # Regular time series plot fig = get_plotly_fig(plotting.with_plotly, data=var_solution.to_dataframe(), mode='area', title=f'Variable: {var_name}') fig.update_layout(height=300) - st.plotly_chart(fig, use_container_width=True) + st.plotly_chart(fig, theme='streamlit', use_container_width=True) show_datatable = st.checkbox('Show data table', key=f'{prefix}_datatable_{var_name}', value=False) if show_datatable: @@ -180,11 +190,12 @@ def explore_results_app(results): connections_data = [] for comp_name, comp in results.components.items(): - for bus_name in comp.inputs + comp.outputs: + for flow_name in comp.inputs + comp.outputs: connections_data.append({ "Component": comp_name, - "Bus": bus_name, - "Type": "Input" if bus_name in comp.inputs else "Output" + "Flow": flow_name, + "Direction": "from" if flow_name in comp.inputs else "to", + "Bus": '?' #TODO }) if connections_data: @@ -230,7 +241,7 @@ def explore_results_app(results): else: fig = get_plotly_fig(component.plot_flow_rates) - st.plotly_chart(fig, use_container_width=True) + st.plotly_chart(fig, theme='streamlit', use_container_width=True) # Also show as dataframe if requested if st.checkbox("Show Data Table"): @@ -270,7 +281,7 @@ def explore_results_app(results): # Use built-in plotting method fig = get_plotly_fig(bus.plot_flow_rates) - st.plotly_chart(fig, use_container_width=True) + st.plotly_chart(fig, theme=None, use_container_width=True) # Also show as dataframe if requested if st.checkbox("Show Data Table"): diff --git a/flixOpt/plotting.py b/flixOpt/plotting.py index 9a90c664d..36faab34b 100644 --- a/flixOpt/plotting.py +++ b/flixOpt/plotting.py @@ -389,7 +389,7 @@ def heat_map_plotly( zmin=color_bar_min, zmax=color_bar_max, colorbar=dict( - title=dict(text='Color Bar Label', side='right'), + title=dict(text='Scale', side='right'), orientation='h', xref='container', yref='container', From 8b265e256d132c9edb18e63b9a61a68aa1090d08 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:22:05 +0100 Subject: [PATCH 63/87] Improve dashoboard --- flixOpt/explorer_app.py | 48 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/flixOpt/explorer_app.py b/flixOpt/explorer_app.py index 0f358eb78..9a7c31cf8 100644 --- a/flixOpt/explorer_app.py +++ b/flixOpt/explorer_app.py @@ -148,7 +148,7 @@ def explore_results_app(results): # Create sidebar for navigation st.sidebar.title("FlixOpt Results Explorer") - pages = ["Overview", "Components", "Buses", "Effects", "Variables", "Heatmaps"] + pages = ["Overview", "Components", "Buses", "Effects"] selected_page = st.sidebar.radio("Navigation", pages) # Overview page @@ -172,34 +172,32 @@ def explore_results_app(results): st.write(f"**Effects:** {len(results.effects)}") st.write(f"**Storage Components:** {len(results.storages)}") - # Additional info - if hasattr(results, 'infos') and results.infos: - st.subheader("Additional Information") - st.json(results.infos) - - # Network info - if hasattr(results, 'network_infos') and results.network_infos: - st.subheader("Network Information") - st.json(results.network_infos) - # Network visualization st.header("Network Structure") + tabs = st.tabs(["Component Connections", "Details of Flow System", "Network Information"]) # Show component connections - st.subheader("Component Connections") - connections_data = [] - - for comp_name, comp in results.components.items(): - for flow_name in comp.inputs + comp.outputs: - connections_data.append({ - "Component": comp_name, - "Flow": flow_name, - "Direction": "from" if flow_name in comp.inputs else "to", - "Bus": '?' #TODO - }) - - if connections_data: - st.dataframe(pd.DataFrame(connections_data)) + with tabs[0]: + connections_data = [] + + for comp_name, comp in results.components.items(): + for flow_name in comp.inputs + comp.outputs: + connections_data.append({ + "Component": comp_name, + "Flow": flow_name, + "Direction": "from" if flow_name in comp.inputs else "to", + "Bus": '?' #TODO + }) + + if connections_data: + st.dataframe(pd.DataFrame(connections_data)) + + with tabs[1]: + st.json(results.infos) + + with tabs[2]: + st.json(results.network_infos) + # Components page elif selected_page == "Components": From 7a9ef36a3a26a9b47e5d0db7a92303bd81046fea Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:24:12 +0100 Subject: [PATCH 64/87] Add dashboard to example --- examples/01_Simple/simple_example.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/01_Simple/simple_example.py b/examples/01_Simple/simple_example.py index 2c05601bf..ab2c50da7 100644 --- a/examples/01_Simple/simple_example.py +++ b/examples/01_Simple/simple_example.py @@ -114,4 +114,6 @@ # Convert the results for the storage component to a dataframe and display df = calculation.results['Storage'].charge_state_and_flow_rates() print(df) - calculation.save_results(save_flow_system=True) + + # Launch a dashboard to explore the results + calculation.results.launch_dashboard() From 09e5e88348faa5f5c6fa88edb1f9dea3f3eae372 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:25:57 +0100 Subject: [PATCH 65/87] Ruff check --- flixOpt/results_explorer.py | 5 ----- scripts/gen_ref_pages.py | 8 ++++---- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/flixOpt/results_explorer.py b/flixOpt/results_explorer.py index a880775aa..e42d32b10 100644 --- a/flixOpt/results_explorer.py +++ b/flixOpt/results_explorer.py @@ -21,11 +21,6 @@ def explore_results(self, port=8501): Returns: subprocess.Popen: The running Streamlit process """ - import subprocess - import sys - import webbrowser - import os - from pathlib import Path # Find explorer app path current_dir = os.path.dirname(os.path.abspath(__file__)) diff --git a/scripts/gen_ref_pages.py b/scripts/gen_ref_pages.py index 8eda70037..8a1b2ff1d 100644 --- a/scripts/gen_ref_pages.py +++ b/scripts/gen_ref_pages.py @@ -1,7 +1,7 @@ """Generate the code reference pages and navigation.""" -from pathlib import Path import sys +from pathlib import Path import mkdocs_gen_files @@ -48,8 +48,8 @@ with mkdocs_gen_files.open(f"{api_dir}/index.md", "w") as index_file: index_file.write("# API Reference\n\n") index_file.write( - f"This section contains the documentation for all modules and classes in flixOpt.\n" - f"For more information on how to use the classes and functions, see the [Concepts & Math](../concepts-and-math/index.md) section.\n") + "This section contains the documentation for all modules and classes in flixOpt.\n" + "For more information on how to use the classes and functions, see the [Concepts & Math](../concepts-and-math/index.md) section.\n") with mkdocs_gen_files.open(f"{api_dir}/SUMMARY.md", "w") as nav_file: - nav_file.writelines(nav.build_literate_nav()) \ No newline at end of file + nav_file.writelines(nav.build_literate_nav()) From 1b14f0cab951a2613aea96a497bd0503676f9c28 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 31 Mar 2025 18:36:53 +0200 Subject: [PATCH 66/87] Bugfixes --- flixopt/explorer_app.py | 6 +++--- flixopt/results_explorer.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 9a7c31cf8..24d522cef 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -11,7 +11,7 @@ import plotly.graph_objects as go import streamlit as st -from flixOpt import plotting +from flixopt import plotting # Helper function to capture plotly figures @@ -334,7 +334,7 @@ def run_explorer_from_file(folder, name): try: from flixopt.results import CalculationResults except ImportError: - from flixOpt.results import CalculationResults + from flixopt.results import CalculationResults except ImportError: # Add potential module paths for path in [os.getcwd(), os.path.dirname(os.path.abspath(__file__))]: @@ -345,7 +345,7 @@ def run_explorer_from_file(folder, name): try: from flixopt.results import CalculationResults except ImportError: - from flixOpt.results import CalculationResults + from flixopt.results import CalculationResults # Load from file results = CalculationResults.from_file(folder, name) diff --git a/flixopt/results_explorer.py b/flixopt/results_explorer.py index e42d32b10..dc8cb527b 100644 --- a/flixopt/results_explorer.py +++ b/flixopt/results_explorer.py @@ -1,7 +1,7 @@ """ FlixOpt Results Explorer -A module for launching a Streamlit app to explore flixOpt calculation results. +A module for launching a Streamlit app to explore flixopt calculation results. """ import os @@ -30,7 +30,7 @@ def explore_results(self, port=8501): if not os.path.exists(explorer_script): raise FileNotFoundError( f'Explorer app not found at {explorer_script}. ' - 'Please ensure the explorer_app.py file is in the flixOpt package directory.' + 'Please ensure the explorer_app.py file is in the flixopt package directory.' ) # Run the Streamlit app - the port argument needs to be separate from the script arguments From 374cd8fd9c30986b1f664f934afea18d87aa7daa Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 13 May 2025 20:46:59 +0200 Subject: [PATCH 67/87] Remove plotting wrapper --- flixopt/explorer_app.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 24d522cef..bcadba398 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -14,16 +14,6 @@ from flixopt import plotting -# Helper function to capture plotly figures -def get_plotly_fig(plot_func, *args, **kwargs): - """Capture a plotly figure from a plotting function""" - # Add default parameters to ensure the function returns the figure without showing it - kwargs['show'] = False - kwargs['save'] = False - - # Call the plotting function - return plot_func(*args, **kwargs) - # Reusable function to display variables def display_variables(variables_dict, prefix=""): """ @@ -116,7 +106,11 @@ def display_variables(variables_dict, prefix=""): st.error(f"Error creating heatmap: {e}") else: # Regular time series plot - fig = get_plotly_fig(plotting.with_plotly, data=var_solution.to_dataframe(), mode='area', title=f'Variable: {var_name}') + fig = plotting.with_plotly( + data=var_solution.to_dataframe(), + style='stacked_bar', + title=f'Variable: {var_name}', + ) fig.update_layout(height=300) st.plotly_chart(fig, theme='streamlit', use_container_width=True) @@ -235,9 +229,9 @@ def explore_results_app(results): # Use built-in plotting method if component.is_storage: - fig = get_plotly_fig(component.plot_charge_state) + fig = component.plot_charge_state(show=False, save=False) else: - fig = get_plotly_fig(component.plot_flow_rates) + fig = component.plot_flow_rates(show=False, save=False) st.plotly_chart(fig, theme='streamlit', use_container_width=True) @@ -278,7 +272,7 @@ def explore_results_app(results): st.subheader("Node Balance") # Use built-in plotting method - fig = get_plotly_fig(bus.plot_flow_rates) + fig = bus.plot_flow_rates(show=False, save=False) st.plotly_chart(fig, theme=None, use_container_width=True) # Also show as dataframe if requested From dc7aff080c39e78b105f7df3b09f4a5b1f62eb15 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 14 May 2025 10:05:12 +0200 Subject: [PATCH 68/87] Improve results_explorer.py --- flixopt/explorer_app.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index bcadba398..1828561ba 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -166,31 +166,41 @@ def explore_results_app(results): st.write(f"**Effects:** {len(results.effects)}") st.write(f"**Storage Components:** {len(results.storages)}") + # Results summary + st.header('Results Summary') + tabs = st.tabs(list(results.summary.keys())) + for i, key in enumerate(results.summary.keys()): + with tabs[i]: + if isinstance(results.summary[key], dict): + st.json(results.summary[key]) + else: + st.write(results.summary[key]) + # Network visualization st.header("Network Structure") - tabs = st.tabs(["Component Connections", "Details of Flow System", "Network Information"]) + tabs = st.tabs(["Component Connections", "Nodes", "Edges"]) # Show component connections with tabs[0]: connections_data = [] - for comp_name, comp in results.components.items(): - for flow_name in comp.inputs + comp.outputs: + for comp_name, comp in results.flow_system.components.items(): + for flow_name, flow in comp.flows.items(): connections_data.append({ "Component": comp_name, "Flow": flow_name, "Direction": "from" if flow_name in comp.inputs else "to", - "Bus": '?' #TODO + "Bus": flow.bus, }) - if connections_data: - st.dataframe(pd.DataFrame(connections_data)) + st.dataframe(pd.DataFrame(connections_data)) + network_infos = results.flow_system.network_infos() with tabs[1]: - st.json(results.infos) + st.json(network_infos[0]) with tabs[2]: - st.json(results.network_infos) + st.json(network_infos[1]) # Components page From c141ed679b3a9be5cb04cb8c887ab51864c3b8b9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 14 May 2025 10:27:53 +0200 Subject: [PATCH 69/87] Update to new results structure --- flixopt/explorer_app.py | 69 +++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 1828561ba..2a428b142 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -10,29 +10,28 @@ import plotly.express as px import plotly.graph_objects as go import streamlit as st +import xarray as xr from flixopt import plotting +from flixopt.results import filter_dataset # Reusable function to display variables -def display_variables(variables_dict, prefix=""): +def display_dataset(ds: xr.Dataset, prefix=""): """ Display variables from a dictionary with options for visualization Args: - variables_dict: Dictionary of variables + ds: Dataset to display prefix: Prefix for widget keys to avoid collisions """ # Add a filter option - variable_filter = st.text_input("Filter variables by name:", key=f"{prefix}_filter") + filter_contains = st.text_input("Filter variables by name:", key=f"{prefix}_filter") - # Get all variables and apply filter - all_variables = list(variables_dict) - - if variable_filter: - filtered_variables = [v for v in all_variables if variable_filter.lower() in v.lower()] - else: - filtered_variables = all_variables + filtered_ds = filter_dataset( + ds, + contains=filter_contains if filter_contains else None, + ) # Heatmap options in a single row show_heatmap_col, heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(4) @@ -60,32 +59,34 @@ def display_variables(variables_dict, prefix=""): key=f"{prefix}_colormap" ) - st.write(f"Showing {len(filtered_variables)} of {len(all_variables)} variables") + st.write(f"Showing {len(filtered_ds)} of {len(ds)} variables") # Display all filtered variables directly - for var_name in filtered_variables: + for name, da in filtered_ds.data_vars.items(): try: - var = variables_dict[var_name] - var_solution = var.solution # Add a divider for each variable - st.markdown(f"### {var_name}") + st.markdown(f"### {name}") # Check if this is a time-based variable - if 'time' in var_solution.dims: + if len(da.dims) > 0: + if len(da.dims) == 2: + data = da.to_pandas() + else: + data = da.to_dataframe() if show_heatmap: try: # Create heatmap using var_solution heatmap_data = plotting.heat_map_data_from_df( - var_solution.to_dataframe(var_name), + data, timeframes, timesteps, 'ffill' ) fig = plotting.heat_map_plotly( - heatmap_data, - title=var_name, + data, + title=name, color_map=color_map, xlabel=f'timeframe [{timeframes}]', ylabel=f'timesteps [{timesteps}]' @@ -107,22 +108,22 @@ def display_variables(variables_dict, prefix=""): else: # Regular time series plot fig = plotting.with_plotly( - data=var_solution.to_dataframe(), + data, style='stacked_bar', - title=f'Variable: {var_name}', + title=f'Variable: {name}', ) fig.update_layout(height=300) st.plotly_chart(fig, theme='streamlit', use_container_width=True) - show_datatable = st.checkbox('Show data table', key=f'{prefix}_datatable_{var_name}', value=False) + show_datatable = st.checkbox('Show data table', key=f'{prefix}_datatable_{name}', value=False) if show_datatable: - st.dataframe(var_solution.to_dataframe()) + st.dataframe(data) else: # Show scalar value - st.write(f"Value: {var_solution.values}") + st.write(f"Value: {da.item()}") except Exception as e: - st.error(f"Error displaying variable {var_name}: {e}") + st.error(f"Error displaying variable {name}: {e}") def explore_results_app(results): @@ -241,24 +242,24 @@ def explore_results_app(results): if component.is_storage: fig = component.plot_charge_state(show=False, save=False) else: - fig = component.plot_flow_rates(show=False, save=False) + fig = component.plot_node_balance(show=False, save=False) st.plotly_chart(fig, theme='streamlit', use_container_width=True) # Also show as dataframe if requested if st.checkbox("Show Data Table"): if component.is_storage: - flow_rates = component.charge_state_and_flow_rates().to_dataframe() + node_balance = component.node_balance_with_charge_state().to_dataframe() else: - flow_rates = component.flow_rates().to_dataframe() - st.dataframe(flow_rates) + node_balance = component.node_balance().to_dataframe() + st.dataframe(node_balance) except Exception as e: st.error(f"Error displaying the node balance: {e}") # Variables tab with tabs[1]: # Use the reusable function - display_variables(component.variables, prefix=f"comp_{component_name}") + display_dataset(component.solution, prefix=f"comp_{component_name}") # Buses page elif selected_page == "Buses": @@ -282,12 +283,12 @@ def explore_results_app(results): st.subheader("Node Balance") # Use built-in plotting method - fig = bus.plot_flow_rates(show=False, save=False) + fig = bus.plot_node_balance(show=False, save=False) st.plotly_chart(fig, theme=None, use_container_width=True) # Also show as dataframe if requested if st.checkbox("Show Data Table"): - df = bus.flow_rates().to_dataframe() + df = bus.node_balance().to_dataframe() st.dataframe(df) # Show inputs and outputs @@ -306,7 +307,7 @@ def explore_results_app(results): # Variables tab with tabs[1]: # Use the reusable function - display_variables(bus.variables, prefix=f"bus_{bus_name}") + display_dataset(bus.solution, prefix=f"bus_{bus_name}") # Effects page elif selected_page == "Effects": @@ -319,7 +320,7 @@ def explore_results_app(results): st.header(f"Effect: {effect_name}") - display_variables(effect.variables, prefix=f"effect_{effect_name}") + display_dataset(effect.solution, prefix=f"effect_{effect_name}") def run_explorer_from_file(folder, name): From 73040c22b765ebed06a39ba2059858ce2cdf88bd Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 14 May 2025 16:36:02 +0200 Subject: [PATCH 70/87] Improve streamlit app --- flixopt/explorer_app.py | 819 ++++++++++++++++++++++++++++++++++------ 1 file changed, 704 insertions(+), 115 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 2a428b142..9cd414514 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -7,123 +7,698 @@ import numpy as np import pandas as pd -import plotly.express as px -import plotly.graph_objects as go import streamlit as st import xarray as xr +import plotly.express as px +import plotly.graph_objects as go +import io +import tempfile +from typing import Dict, List, Optional, Union, Tuple, Any from flixopt import plotting -from flixopt.results import filter_dataset -# Reusable function to display variables -def display_dataset(ds: xr.Dataset, prefix=""): +def plot_heatmap( + data: pd.DataFrame, + name: str, + timeframes: str, + timesteps: str, + color_map: str, +) -> go.Figure: + fig = plotting.heat_map_plotly( + plotting.heat_map_data_from_df(data, timeframes, timesteps, 'ffill'), + title=name, + color_map=color_map, + xlabel=f'timeframe [{timeframes}]', + ylabel=f'timesteps [{timesteps}]', + ) + fig.update_layout( + margin=dict(l=50, r=100, t=50, b=50), # Extra space for colorbar + coloraxis_colorbar=dict( + lenmode='fraction', + len=0.8, + title='Scale', + tickvals=[0, 5, 10], # Force ticks at min, mid, max + ticktext=['0 (Min)', '5', '10 (Max)'], # Custom labels + ), # Make colorbar bigger + ) + return fig + + +def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Optional[Union[slice, List]]: + """Creates UI elements to select values or ranges for a specific dimension. + + Args: + dataset: The dataset containing the dimension. + dim: The dimension name. + container: The streamlit container to render UI elements in. + + Returns: + The selected filter for this dimension (either a slice or list of values). + Returns None if no selection is made. """ - Display variables from a dictionary with options for visualization + container.write(f'**{dim}** (size: {len(dataset[dim])})') + + if len(dataset[dim]) <= 5: + # For small dimensions, use a multiselect + values = dataset[dim].values + if isinstance(values[0], np.datetime64): + values = [pd.to_datetime(str(val)) for val in values] + selected = container.multiselect(f'Select {dim} values', options=values, default=[values[0]]) + if selected: + return selected + return None + else: + # For larger dimensions, use a range slider + min_val = float(dataset[dim].min().values) + max_val = float(dataset[dim].max().values) + + if isinstance(dataset[dim].values[0], np.datetime64): + # Handle datetime dimensions + date_min = pd.to_datetime(str(dataset[dim].min().values)) + date_max = pd.to_datetime(str(dataset[dim].max().values)) + start_date, end_date = container.date_input( + f'Select {dim} range', + value=(date_min, date_max), + min_value=date_min, + max_value=date_max, + ) + return slice(str(start_date), str(end_date)) + else: + # Handle numeric dimensions + step = (max_val - min_val) / 100 + range_val = container.slider( + f'Select {dim} range', + min_value=min_val, + max_value=max_val, + value=(min_val, min(min_val + (max_val - min_val) / 10, max_val)), + step=step, + ) + return slice(range_val[0], range_val[1]) + + +def filter_and_aggregate( + dataset: xr.Dataset, var_name: str, filters: Dict[str, Union[slice, List]], agg_dims: List[str], agg_method: str +) -> xr.DataArray: + """Filters and aggregates a variable from the dataset. Args: - ds: Dataset to display - prefix: Prefix for widget keys to avoid collisions + dataset: The dataset containing the variable. + var_name: Name of the variable to process. + filters: Dictionary of dimension filters. + agg_dims: Dimensions to aggregate over. + agg_method: Aggregation method (mean, sum, etc.). + + Returns: + Filtered and aggregated data. """ - # Add a filter option - filter_contains = st.text_input("Filter variables by name:", key=f"{prefix}_filter") + # Get the variable + variable = dataset[var_name] + + # Filter the data + if filters: + filtered_data = variable.sel(**filters) + else: + filtered_data = variable + + # Apply aggregation if selected + if agg_dims and agg_method: + if agg_method == 'mean': + filtered_data = filtered_data.mean(dim=agg_dims) + elif agg_method == 'sum': + filtered_data = filtered_data.sum(dim=agg_dims) + elif agg_method == 'min': + filtered_data = filtered_data.min(dim=agg_dims) + elif agg_method == 'max': + filtered_data = filtered_data.max(dim=agg_dims) + elif agg_method == 'std': + filtered_data = filtered_data.std(dim=agg_dims) + + return filtered_data + + +def resample_time_data(data: xr.DataArray, freq: str) -> xr.DataArray: + """Resamples a DataArray along its time dimension. - filtered_ds = filter_dataset( - ds, - contains=filter_contains if filter_contains else None, - ) + Args: + data: The xarray DataArray containing a time dimension. + freq: The resampling frequency string (e.g., 'D', 'M', 'Y'). - # Heatmap options in a single row - show_heatmap_col, heatmap_col1, heatmap_col2, heatmap_col3 = st.columns(4) - with show_heatmap_col: - show_heatmap = st.checkbox('Show as heatmap', value=False, key=f"{prefix}_heatmap") - with heatmap_col1: - timeframes = st.selectbox( - 'Timeframes', - ['YS', 'MS', 'W', 'D', 'h', '15min', 'min'], - index=3, # Default to "D" - key=f"{prefix}_timeframes" - ) - with heatmap_col2: - timesteps = st.selectbox( - 'Timesteps', - ['W', 'D', 'h', '15min', 'min'], - index=2, # Default to "h" - key=f"{prefix}_timesteps" - ) - with heatmap_col3: - color_map = st.selectbox( - 'Colormap', - ['portland', 'viridis', 'plasma', 'inferno', 'magma', 'cividis', 'RdBu', 'Blues', 'YlOrRd'], - index=0, - key=f"{prefix}_colormap" - ) + Returns: + The resampled DataArray. + """ + # Find the time dimension name + time_dims = [dim for dim in data.dims if dim in ['time', 't'] or (isinstance(data[dim].values[0], np.datetime64))] - st.write(f"Showing {len(filtered_ds)} of {len(ds)} variables") + if not time_dims: + # No time dimension found + return data - # Display all filtered variables directly - for name, da in filtered_ds.data_vars.items(): - try: + time_dim = time_dims[0] - # Add a divider for each variable - st.markdown(f"### {name}") + try: + # Resample the data - default aggregation is mean + resampled_data = data.resample({time_dim: freq}).mean() + return resampled_data + except Exception as e: + print(f'Error resampling data: {e}') + return data - # Check if this is a time-based variable - if len(da.dims) > 0: - if len(da.dims) == 2: - data = da.to_pandas() - else: - data = da.to_dataframe() - if show_heatmap: + +def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, Optional[str]]: + """Creates UI elements for time-based aggregation options. + + Args: + container: The streamlit container to render UI elements in. + data: The xarray DataArray to check for time dimensions. + + Returns: + A tuple containing: + - Boolean indicating if time resampling should be applied + - The selected resampling frequency (or None if no resampling) + """ + # Find time dimensions + time_dims = [dim for dim in data.dims if dim in ['time', 't'] or (isinstance(data[dim].values[0], np.datetime64))] + + if not time_dims: + # No time dimensions + return False, None + + # Show time resampling options + container.subheader('Time Resampling') + time_dim = time_dims[0] + + # Check if the dimension has enough elements to be worth resampling + min_elements_for_resampling = 5 + if len(data[time_dim]) < min_elements_for_resampling: + container.info(f"Time dimension '{time_dim}' has too few elements for resampling.") + return False, None + + # Get the time range for display + start_time = pd.to_datetime(data[time_dim].values[0]) + end_time = pd.to_datetime(data[time_dim].values[-1]) + time_range = end_time - start_time + + # Show time range information + container.write(f'Time range: {start_time.date()} to {end_time.date()} ({time_range.days} days)') + + # Determine appropriate resampling options based on the time range + resampling_options = [] + + # Always include options for hourly/daily data + if time_range.days >= 2: + resampling_options.extend(['H', 'D']) + + # For data spanning more than a week + if time_range.days > 7: + resampling_options.extend(['W']) + + # For data spanning more than a month + if time_range.days > 30: + resampling_options.extend(['M']) + + # For data spanning more than a year + if time_range.days > 365: + resampling_options.extend(['Q', 'Y']) + + # Ensure we have at least some options + if not resampling_options: + resampling_options = ['H', 'D', 'W', 'M'] + + # Create friendly names for UI + freq_map = {'H': 'Hour', 'D': 'Day', 'W': 'Week', 'M': 'Month', 'Q': 'Quarter', 'Y': 'Year'} + + friendly_options = [freq_map.get(opt, opt) for opt in resampling_options] + + # Add "None" option for no resampling + resampling_options = ['none'] + resampling_options + friendly_options = ['None (original data)'] + friendly_options + + # Add "Custom" option + resampling_options.append('custom') + friendly_options.append('Custom frequency string') + + # Create the selection widget + use_resampling = container.checkbox('Enable time resampling', value=False) + + if use_resampling: + selected_freq_name = container.selectbox('Resample to:', options=friendly_options) + + # Map back to actual frequency string + selected_index = friendly_options.index(selected_freq_name) + selected_freq = resampling_options[selected_index] + + if selected_freq == 'none': + return False, None + elif selected_freq == 'custom': + # Provide information about pandas frequency strings + with container.expander('Frequency string help'): + container.write(""" + **Pandas frequency strings examples:** + - '5min': 5 minutes + - '2H': 2 hours + - '1D': 1 day + - '1W': 1 week + - '2W-MON': Biweekly on Monday + - '1M': 1 month + - '1Q': 1 quarter + - '1A' or '1Y': 1 year + - '3A': 3 years + + You can also use combinations like '1D12H' for 1 day and 12 hours. + """) + + # Allow user to input a custom frequency string + custom_freq = container.text_input( + 'Enter custom frequency string:', + value='1D', # Default to daily + help="Enter a pandas frequency string like '5min', '2H', '1D', '1W', '1M'", + ) + + if custom_freq: + # Validate the frequency string + try: + # Try to create a sample resampling to validate the string + test_dates = pd.date_range(start_time, periods=3, freq='D') + test_series = pd.Series(range(3), index=test_dates) + test_series.resample(custom_freq).mean() + + # If we get here, the frequency string is valid + # Show information about what resampling will do try: - # Create heatmap using var_solution - heatmap_data = plotting.heat_map_data_from_df( - data, - timeframes, - timesteps, - 'ffill' - ) - - fig = plotting.heat_map_plotly( - data, - title=name, - color_map=color_map, - xlabel=f'timeframe [{timeframes}]', - ylabel=f'timesteps [{timesteps}]' - ) - fig.update_layout( - margin=dict(l=50, r=100, t=50, b=50), # Extra space for colorbar - coloraxis_colorbar=dict( - lenmode='fraction', - len=0.8, - title='Scale', - tickvals=[0, 5, 10], # Force ticks at min, mid, max - ticktext=['0 (Min)', '5', '10 (Max)'], # Custom labels - ), # Make colorbar bigger - ) - - st.plotly_chart(fig, theme=None, use_container_width=True) + resampled = data.resample({time_dim: custom_freq}).mean() + n_points_before = len(data[time_dim]) + n_points_after = len(resampled[time_dim]) + container.info(f'Resampling will change data points from {n_points_before} to {n_points_after}') except Exception as e: - st.error(f"Error creating heatmap: {e}") + container.warning(f'Cannot preview resampling effect: {str(e)}') + + return True, custom_freq + except Exception as e: + container.error(f'Invalid frequency string: {str(e)}') + return False, None + else: + return False, None + else: + # Show information about what resampling will do + n_points_before = len(data[time_dim]) + + # Calculate approximate number of points after resampling + try: + resampled = data.resample({time_dim: selected_freq}).mean() + n_points_after = len(resampled[time_dim]) + container.info(f'Resampling will change data points from {n_points_before} to {n_points_after}') + except Exception as e: + container.warning(f'Cannot preview resampling effect: {str(e)}') + + return True, selected_freq + else: + return False, None + + +def create_plotly_plot( + data: xr.DataArray, plot_type: str, var_name: str, title: Optional[str] = None, x_dim: Optional[str] = None +) -> go.Figure: + """Creates a plotly plot based on the selected data and plot type. + + Args: + data: The filtered/aggregated data array to plot. + plot_type: Type of plot to create (Line, Stacked Bar, Grouped Bar, or Heatmap). + var_name: Name of the selected variable. + title: Plot title. + x_dim: Dimension to use for x-axis in line plots. + + Returns: + Plotly figure object. + """ + # Check if we have valid data to plot + if data is None: + return go.Figure().update_layout( + title='No data to plot', + annotations=[ + dict( + text='No valid data found for plotting. Check your selections.', + showarrow=False, + xref='paper', + yref='paper', + x=0.5, + y=0.5, + ) + ], + ) + + # Get dimensions of the data array + dims = list(data.dims) + + # Create different plot types based on dimensions and selection + if plot_type == 'Line': + # Line plot + if len(dims) == 1: + # Simple line plot for 1D data + x_values = data[dims[0]].values + y_values = data.values + + fig = px.line(x=x_values, y=y_values, labels={'x': dims[0], 'y': var_name}, title=title) + + elif len(dims) >= 2 and x_dim is not None: + # Multiple lines for higher dimensional data + # Convert to dataframe for easy plotting + df = data.to_dataframe().reset_index() + + # Group by the x dimension + group_dims = [d for d in dims if d != x_dim] + + if len(group_dims) == 0: + # If no grouping dimensions, just plot a single line + fig = px.line(df, x=x_dim, y=var_name, title=title) + else: + # Create a plot with a line for each unique combination of group dimensions + fig = px.line( + df, + x=x_dim, + y=var_name, + color=group_dims[0] if len(group_dims) == 1 else None, # Use first group dim for color + facet_col=group_dims[1] if len(group_dims) > 1 else None, # Use second group dim for faceting + title=title, + ) + else: + # Not enough dimensions for line plot + fig = go.Figure().update_layout( + title='Cannot create Line plot', + annotations=[ + dict( + text='Need at least one dimension for Line plot', + showarrow=False, + xref='paper', + yref='paper', + x=0.5, + y=0.5, + ) + ], + ) + + elif plot_type == 'Stacked Bar': + if len(dims) >= 2: + # Convert to dataframe + df = data.to_dataframe().reset_index() + + # For stacked bar, need a category dimension and a value dimension + if x_dim is not None and x_dim in dims: + # Use the selected x dimension + x = x_dim + # Get another dimension for stacking + stack_dim = next((d for d in dims if d != x_dim), None) + + if stack_dim: + fig = px.bar(df, x=x, y=var_name, color=stack_dim, barmode='stack', title=title) else: - # Regular time series plot - fig = plotting.with_plotly( - data, - style='stacked_bar', - title=f'Variable: {name}', + # No dimension to stack + fig = px.bar(df, x=x, y=var_name, title=title) + else: + # Default to first dimension for x-axis + x = dims[0] + stack_dim = dims[1] if len(dims) > 1 else None + + fig = px.bar(df, x=x, y=var_name, color=stack_dim, barmode='stack', title=title) + elif len(dims) == 1: + # Single dimension bar plot + df = data.to_dataframe().reset_index() + + fig = px.bar(df, x=dims[0], y=var_name, title=title) + else: + # Not enough dimensions + fig = go.Figure().update_layout( + title='Cannot create Stacked Bar plot', + annotations=[ + dict( + text='Need at least one dimension for Stacked Bar plot', + showarrow=False, + xref='paper', + yref='paper', + x=0.5, + y=0.5, ) - fig.update_layout(height=300) - st.plotly_chart(fig, theme='streamlit', use_container_width=True) + ], + ) + + elif plot_type == 'Grouped Bar': + if len(dims) >= 2: + # Convert to dataframe + df = data.to_dataframe().reset_index() + + # For grouped bar, need a category dimension and a group dimension + if x_dim is not None and x_dim in dims: + # Use the selected x dimension + x = x_dim + # Get another dimension for grouping + group_dim = next((d for d in dims if d != x_dim), None) + + if group_dim: + fig = px.bar(df, x=x, y=var_name, color=group_dim, barmode='group', title=title) + else: + # No dimension to group + fig = px.bar(df, x=x, y=var_name, title=title) + else: + # Default to first dimension for x-axis + x = dims[0] + group_dim = dims[1] if len(dims) > 1 else None + + fig = px.bar(df, x=x, y=var_name, color=group_dim, barmode='group', title=title) + elif len(dims) == 1: + # Single dimension bar plot + df = data.to_dataframe().reset_index() + + fig = px.bar(df, x=dims[0], y=var_name, title=title) + else: + # Not enough dimensions + fig = go.Figure().update_layout( + title='Cannot create Grouped Bar plot', + annotations=[ + dict( + text='Need at least one dimension for Grouped Bar plot', + showarrow=False, + xref='paper', + yref='paper', + x=0.5, + y=0.5, + ) + ], + ) + + elif plot_type == 'Heatmap' and len(dims) >= 2: + # Heatmap for 2D data + if len(dims) > 2: + # If more than 2 dimensions, need to select which dimensions to use + if x_dim is not None and x_dim in dims: + # Use x_dim and find another dimension + dim1 = x_dim + dim2 = next((d for d in dims if d != x_dim), None) + + # Need to aggregate other dimensions + agg_dims = [d for d in dims if d != dim1 and d != dim2] + if agg_dims: + # Aggregate other dimensions using mean + data = data.mean(dim=agg_dims) + else: + # Use first two dimensions + dim1, dim2 = dims[:2] + + # Aggregate other dimensions if needed + if len(dims) > 2: + agg_dims = dims[2:] + data = data.mean(dim=agg_dims) + else: + dim1, dim2 = dims + + # Create heatmap + fig = px.imshow( + data.values, + x=data[dim1].values, + y=data[dim2].values, + labels=dict(x=dim1, y=dim2, color=var_name), + title=title, + color_continuous_scale='Viridis', + ) + else: + # Default empty plot with warning + fig = go.Figure().update_layout( + title='Cannot create plot', + annotations=[ + dict( + text=f'Cannot create {plot_type} plot with the current data dimensions', + showarrow=False, + xref='paper', + yref='paper', + x=0.5, + y=0.5, + ) + ], + ) + + # Common layout settings + fig.update_layout( + height=600, + width=800, + margin=dict(l=50, r=50, t=50, b=50), + legend=dict(orientation='h', yanchor='bottom', y=1.02, xanchor='right', x=1), + ) + + return fig + + +def download_data(filtered_data: xr.DataArray, var_name: str, download_format: str, container: Any) -> None: + """Creates download buttons for the filtered data. + + Args: + filtered_data: The filtered data to download. + var_name: Name of the variable. + download_format: Format to download (CSV, NetCDF, Excel). + container: Streamlit container to place the download button. + """ + if download_format == 'CSV': + csv = filtered_data.to_dataframe().reset_index().to_csv(index=False) + container.download_button(label='Download CSV', data=csv, file_name=f'{var_name}_filtered.csv', mime='text/csv') + elif download_format == 'NetCDF': + # Create temp file for netCDF + with tempfile.NamedTemporaryFile(delete=False, suffix='.nc') as tmp: + filtered_data.to_netcdf(tmp.name) + with open(tmp.name, 'rb') as f: + container.download_button( + label='Download NetCDF', + data=f.read(), + file_name=f'{var_name}_filtered.nc', + mime='application/x-netcdf', + ) + elif download_format == 'Excel': + # Create in-memory Excel file + buffer = io.BytesIO() + filtered_data.to_dataframe().reset_index().to_excel(buffer, index=False) + buffer.seek(0) + + container.download_button( + label='Download Excel', + data=buffer, + file_name=f'{var_name}_filtered.xlsx', + mime='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + ) + - show_datatable = st.checkbox('Show data table', key=f'{prefix}_datatable_{name}', value=False) - if show_datatable: - st.dataframe(data) +def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr.DataArray: + """A reusable Streamlit component that creates an xarray dataset explorer. + This component allows users to interactively explore an xarray Dataset by + selecting variables, filtering dimensions, and creating visualizations. + + Args: + dataset: The xarray Dataset to explore. + container: The Streamlit container to render the explorer in. + If None, renders in the current Streamlit app context. + + Returns: + The filtered/selected data for the selected variable. + """ + # If no container is provided, use the current Streamlit context + if container is None: + container = st + + # Dataset information + with container.expander('Dataset Overview'): + container.write('### Dataset Metadata') + container.write(dataset.attrs) + + container.write('### Dimensions') + container.write(pd.DataFrame({'Dimension': list(dataset.dims.keys()), 'Size': list(dataset.dims.values())})) + + container.write('### Variables') + var_info = [] + for var_name, var in dataset.variables.items(): + var_info.append( + { + 'Variable': var_name, + 'Dimensions': ', '.join(var.dims), + 'Shape': str(var.shape), + 'Type': str(var.dtype), + } + ) + container.dataframe(pd.DataFrame(var_info)) + + # Variable selection - single variable only + container.subheader('Variable Selection') + selected_var = container.selectbox('Select variable to explore', list(dataset.data_vars)) + + # Get the variable + variable = dataset[selected_var] + + # Display variable info + container.write(f'**Variable shape:** {variable.shape}') + container.write(f'**Variable dimensions:** {variable.dims}') + dims = list(variable.dims) + + # Create column layout + col1, col2 = container.columns([1, 2]) + + with col1: + container.subheader('Query Parameters') + + # Set filters for each dimension + filters = {} + for dim in dims: + dim_filter = get_dimension_selector(dataset, dim, container) + if dim_filter is not None: + filters[dim] = dim_filter + + # Aggregation options + container.subheader('Aggregation Options') + agg_dims = container.multiselect('Dimensions to aggregate', dims) + agg_method = container.selectbox('Aggregation method', ['mean', 'sum', 'min', 'max', 'std']) + + # Check if data has time dimension and add time resampling UI + use_time_resampling, resampling_freq = get_time_aggregation_ui(container, variable) + + # Plot type selection - limited to the requested types + container.subheader('Plot Settings') + plot_type = container.selectbox('Plot type', ['Line', 'Stacked Bar', 'Grouped Bar', 'Heatmap']) + + if plot_type in ['Line', 'Stacked Bar', 'Grouped Bar']: + remaining_dims = [d for d in dims if d not in agg_dims] + if remaining_dims: + x_dim = container.selectbox('X axis dimension', remaining_dims) else: - # Show scalar value - st.write(f"Value: {da.item()}") - except Exception as e: - st.error(f"Error displaying variable {name}: {e}") + x_dim = None + else: + x_dim = None + + # Filter and aggregate the selected variable + filtered_data = filter_and_aggregate(dataset, selected_var, filters, agg_dims, agg_method) + + # Apply time resampling if requested + if use_time_resampling and resampling_freq: + container.info(f'Applying time resampling with frequency: {resampling_freq}') + filtered_data = resample_time_data(filtered_data, resampling_freq) + + # Display the visualizations + with col2: + container.subheader('Visualization') + + # Create the plot + plot_title = f'{selected_var} {plot_type} Plot' + fig = create_plotly_plot(filtered_data, plot_type, selected_var, title=plot_title, x_dim=x_dim) + + # Show the plot + container.plotly_chart(fig, use_container_width=True) + + # Data preview + with container.expander('Data Preview'): + container.dataframe(filtered_data.to_dataframe()) + + # Download options + container.subheader('Download Options') + download_format = container.selectbox('Download format', ['CSV', 'NetCDF', 'Excel']) + + if container.button('Download filtered data'): + download_data(filtered_data, selected_var, download_format, container) + + return filtered_data def explore_results_app(results): @@ -143,7 +718,7 @@ def explore_results_app(results): # Create sidebar for navigation st.sidebar.title("FlixOpt Results Explorer") - pages = ["Overview", "Components", "Buses", "Effects"] + pages = ["Overview", "Components", "Buses", "Effects", "Explorer"] selected_page = st.sidebar.radio("Navigation", pages) # Overview page @@ -238,28 +813,39 @@ def explore_results_app(results): try: st.subheader("Node Balance") + scenario = ( + st.selectbox(f'Select a scenario: {results.scenarios[0]}', list(results.scenarios)) + if results.scenarios is not None + else None + ) + # Use built-in plotting method if component.is_storage: - fig = component.plot_charge_state(show=False, save=False) + fig = component.plot_charge_state(show=False, save=False, scenario=scenario) else: - fig = component.plot_node_balance(show=False, save=False) + fig = component.plot_node_balance(show=False, save=False, scenario=scenario) st.plotly_chart(fig, theme='streamlit', use_container_width=True) # Also show as dataframe if requested if st.checkbox("Show Data Table"): if component.is_storage: - node_balance = component.node_balance_with_charge_state().to_dataframe() + node_balance = component.node_balance_with_charge_state() + else: + node_balance = component.node_balance() + + if scenario: + st.dataframe(node_balance.sel(scenario=scenario).to_pandas()) else: - node_balance = component.node_balance().to_dataframe() - st.dataframe(node_balance) + st.dataframe(node_balance.to_pandas()) + except Exception as e: st.error(f"Error displaying the node balance: {e}") # Variables tab with tabs[1]: # Use the reusable function - display_dataset(component.solution, prefix=f"comp_{component_name}") + xarray_explorer_component(component.solution) # Buses page elif selected_page == "Buses": @@ -282,32 +868,31 @@ def explore_results_app(results): try: st.subheader("Node Balance") + scenario = ( + st.selectbox(f'Select a scenario: {results.scenarios[0]}', list(results.scenarios)) + if results.scenarios is not None + else None + ) + # Use built-in plotting method - fig = bus.plot_node_balance(show=False, save=False) + fig = bus.plot_node_balance(show=False, save=False, scenario=scenario) st.plotly_chart(fig, theme=None, use_container_width=True) # Also show as dataframe if requested if st.checkbox("Show Data Table"): - df = bus.node_balance().to_dataframe() + if scenario: + df = bus.node_balance().sel(scenario=scenario).to_pandas() + else: + df = bus.node_balance().to_pandas() st.dataframe(df) - # Show inputs and outputs - col1, col2 = st.columns(2) - with col1: - st.subheader("Inputs") - for input_name in bus.inputs: - st.write(f"- {input_name}") - with col2: - st.subheader("Outputs") - for output_name in bus.outputs: - st.write(f"- {output_name}") except Exception as e: st.error(f"Error displaying the node balance: {e}") # Variables tab with tabs[1]: # Use the reusable function - display_dataset(bus.solution, prefix=f"bus_{bus_name}") + xarray_explorer_component(bus.solution) # Effects page elif selected_page == "Effects": @@ -320,7 +905,11 @@ def explore_results_app(results): st.header(f"Effect: {effect_name}") - display_dataset(effect.solution, prefix=f"effect_{effect_name}") + xarray_explorer_component(effect.solution) + + elif selected_page == "Explorer": + st.title("Explorer") + xarray_explorer_component(results.solution) def run_explorer_from_file(folder, name): From f54483da0c91da9f6d2406fc930dca1d09bdff40 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 14 May 2025 16:47:15 +0200 Subject: [PATCH 71/87] Improve coord selection --- flixopt/explorer_app.py | 135 +++++++++++++++++++++++++++++++++------- 1 file changed, 111 insertions(+), 24 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 9cd414514..7f946b867 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -59,42 +59,129 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt """ container.write(f'**{dim}** (size: {len(dataset[dim])})') - if len(dataset[dim]) <= 5: - # For small dimensions, use a multiselect - values = dataset[dim].values - if isinstance(values[0], np.datetime64): + # Get the values for this dimension + values = dataset[dim].values + + # Check if we have no data to work with + if len(values) == 0: + container.warning(f"Dimension '{dim}' is empty") + return None + + # Determine the data type of the dimension + first_val = values[0] + + # Case 1: Small number of values - always use multiselect regardless of type + if len(values) <= 5: + # For datetime64, convert to readable datetime objects + if isinstance(first_val, np.datetime64): values = [pd.to_datetime(str(val)) for val in values] + selected = container.multiselect(f'Select {dim} values', options=values, default=[values[0]]) if selected: return selected return None - else: - # For larger dimensions, use a range slider - min_val = float(dataset[dim].min().values) - max_val = float(dataset[dim].max().values) - - if isinstance(dataset[dim].values[0], np.datetime64): - # Handle datetime dimensions - date_min = pd.to_datetime(str(dataset[dim].min().values)) - date_max = pd.to_datetime(str(dataset[dim].max().values)) - start_date, end_date = container.date_input( - f'Select {dim} range', - value=(date_min, date_max), - min_value=date_min, - max_value=date_max, - ) - return slice(str(start_date), str(end_date)) - else: - # Handle numeric dimensions - step = (max_val - min_val) / 100 + + # Case 2: Datetime values - use date picker + elif isinstance(first_val, np.datetime64): + date_min = pd.to_datetime(str(dataset[dim].min().values)) + date_max = pd.to_datetime(str(dataset[dim].max().values)) + start_date, end_date = container.date_input( + f'Select {dim} range', + value=(date_min, min(date_min + pd.Timedelta(days=30), date_max)), + min_value=date_min, + max_value=date_max, + ) + return slice(str(start_date), str(end_date)) + + # Case 3: String values (categorical data) - use multiselect with limiting features + elif isinstance(first_val, str): + # For string values, provide a way to select multiple values + # First, get unique values and sort them + unique_values = sorted(list(set(values))) + + # If we have too many unique values, provide a selection mechanism + if len(unique_values) > 20: + container.warning(f"Dimension '{dim}' has {len(unique_values)} unique string values. Showing first 20.") + + # Option to show all values or search + show_all = container.checkbox(f"Show all values for '{dim}'", value=False) + + if show_all: + # Show all values but provide a text search to filter + search_term = container.text_input(f"Filter values for '{dim}'", '') + + if search_term: + # Filter values that contain the search term + filtered_values = [val for val in unique_values if search_term.lower() in str(val).lower()] + if not filtered_values: + container.warning(f"No values matching '{search_term}'") + return None + + unique_values = filtered_values + else: + # Just show the first 20 values + unique_values = unique_values[:20] + + # Display multiselect with available values + selected = container.multiselect( + f'Select {dim} values', options=unique_values, default=[unique_values[0]] if unique_values else [] + ) + + if selected: + return selected + return None + + # Case 4: Numeric values - use slider + elif np.issubdtype(type(first_val), np.number) or isinstance(first_val, (int, float)): + try: + min_val = float(dataset[dim].min().values) + max_val = float(dataset[dim].max().values) + + # Check for identical min/max values + if min_val == max_val: + container.info(f"All values in dimension '{dim}' are identical: {min_val}") + return None + + # Determine appropriate step size + range_size = max_val - min_val + if range_size < 1: + step = range_size / 100 + elif range_size < 10: + step = 0.1 + elif range_size < 100: + step = 1 + else: + step = range_size / 100 + + # Round values for better UI range_val = container.slider( f'Select {dim} range', min_value=min_val, max_value=max_val, - value=(min_val, min(min_val + (max_val - min_val) / 10, max_val)), + value=(min_val, min(min_val + range_size / 10, max_val)), step=step, ) return slice(range_val[0], range_val[1]) + except Exception as e: + container.error(f"Error creating slider for '{dim}': {e}") + return None + + # Case 5: Unknown/Unhandled type - fallback to multiselect with first 20 values + else: + container.warning(f"Dimension '{dim}' has an unusual data type. Using simple selection.") + + # Limit to first 20 values to avoid overwhelming the UI + display_values = list(values)[:20] + + selected = container.multiselect( + f'Select {dim} values (first 20 shown)', + options=display_values, + default=[display_values[0]] if display_values else [], + ) + + if selected: + return selected + return None def filter_and_aggregate( From b13a54be5c095fe143b3c0cc71cf9af5483c17b1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Wed, 14 May 2025 16:55:11 +0200 Subject: [PATCH 72/87] Add keys to each st element --- flixopt/explorer_app.py | 194 +++++++++++++++++++++++++++++----------- 1 file changed, 144 insertions(+), 50 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 7f946b867..860bceabf 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -70,13 +70,19 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt # Determine the data type of the dimension first_val = values[0] + # Create unique keys for all widgets based on dimension name + # This prevents duplicate widget ID errors + widget_key_base = f'dim_selector_{dim}' + # Case 1: Small number of values - always use multiselect regardless of type if len(values) <= 5: # For datetime64, convert to readable datetime objects if isinstance(first_val, np.datetime64): values = [pd.to_datetime(str(val)) for val in values] - selected = container.multiselect(f'Select {dim} values', options=values, default=[values[0]]) + selected = container.multiselect( + f'Select {dim} values', options=values, default=[values[0]], key=f'{widget_key_base}_small_multiselect' + ) if selected: return selected return None @@ -90,6 +96,7 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt value=(date_min, min(date_min + pd.Timedelta(days=30), date_max)), min_value=date_min, max_value=date_max, + key=f'{widget_key_base}_date_input', ) return slice(str(start_date), str(end_date)) @@ -104,11 +111,13 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt container.warning(f"Dimension '{dim}' has {len(unique_values)} unique string values. Showing first 20.") # Option to show all values or search - show_all = container.checkbox(f"Show all values for '{dim}'", value=False) + show_all = container.checkbox( + f"Show all values for '{dim}'", value=False, key=f'{widget_key_base}_show_all' + ) if show_all: # Show all values but provide a text search to filter - search_term = container.text_input(f"Filter values for '{dim}'", '') + search_term = container.text_input(f"Filter values for '{dim}'", '', key=f'{widget_key_base}_search') if search_term: # Filter values that contain the search term @@ -124,7 +133,10 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt # Display multiselect with available values selected = container.multiselect( - f'Select {dim} values', options=unique_values, default=[unique_values[0]] if unique_values else [] + f'Select {dim} values', + options=unique_values, + default=[unique_values[0]] if unique_values else [], + key=f'{widget_key_base}_str_multiselect', ) if selected: @@ -160,6 +172,7 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt max_value=max_val, value=(min_val, min(min_val + range_size / 10, max_val)), step=step, + key=f'{widget_key_base}_slider', ) return slice(range_val[0], range_val[1]) except Exception as e: @@ -177,6 +190,7 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt f'Select {dim} values (first 20 shown)', options=display_values, default=[display_values[0]] if display_values else [], + key=f'{widget_key_base}_fallback_multiselect', ) if selected: @@ -229,7 +243,7 @@ def resample_time_data(data: xr.DataArray, freq: str) -> xr.DataArray: Args: data: The xarray DataArray containing a time dimension. - freq: The resampling frequency string (e.g., 'D', 'M', 'Y'). + freq: The resampling frequency string (e.g., 'D', 'M', 'Y', '5min'). Returns: The resampled DataArray. @@ -265,12 +279,20 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O - The selected resampling frequency (or None if no resampling) """ # Find time dimensions - time_dims = [dim for dim in data.dims if dim in ['time', 't'] or (isinstance(data[dim].values[0], np.datetime64))] + time_dims = [ + dim + for dim in data.dims + if dim in ['time', 't', 'date', 'datetime'] + or (len(data[dim]) > 0 and isinstance(data[dim].values[0], np.datetime64)) + ] if not time_dims: # No time dimensions return False, None + # Create unique key base for all widgets + key_base = f'time_resampling_{time_dims[0]}' + # Show time resampling options container.subheader('Time Resampling') time_dim = time_dims[0] @@ -282,31 +304,48 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O return False, None # Get the time range for display - start_time = pd.to_datetime(data[time_dim].values[0]) - end_time = pd.to_datetime(data[time_dim].values[-1]) - time_range = end_time - start_time - - # Show time range information - container.write(f'Time range: {start_time.date()} to {end_time.date()} ({time_range.days} days)') + try: + # Convert to pandas datetime for safe handling of different datetime formats + time_values = data[time_dim].values + if isinstance(time_values[0], str): + # Try to convert string dates to datetime + time_values = pd.to_datetime(time_values) + + start_time = pd.to_datetime(time_values[0]) + end_time = pd.to_datetime(time_values[-1]) + time_range = end_time - start_time + + # Show time range information + container.write(f'Time range: {start_time.date()} to {end_time.date()} ({time_range.days} days)') + except Exception as e: + container.warning(f'Error determining time range: {e}') + # Even if there's an error showing the range, we can still offer resampling + time_range = pd.Timedelta(days=365) # Assume a 1-year range as default # Determine appropriate resampling options based on the time range resampling_options = [] - # Always include options for hourly/daily data - if time_range.days >= 2: - resampling_options.extend(['H', 'D']) + try: + days = time_range.days + + # Always include options for hourly/daily data + if days >= 2: + resampling_options.extend(['H', 'D']) - # For data spanning more than a week - if time_range.days > 7: - resampling_options.extend(['W']) + # For data spanning more than a week + if days > 7: + resampling_options.extend(['W']) - # For data spanning more than a month - if time_range.days > 30: - resampling_options.extend(['M']) + # For data spanning more than a month + if days > 30: + resampling_options.extend(['M']) - # For data spanning more than a year - if time_range.days > 365: - resampling_options.extend(['Q', 'Y']) + # For data spanning more than a year + if days > 365: + resampling_options.extend(['Q', 'Y']) + except: + # Fallback options if we can't determine from time range + resampling_options = ['H', 'D', 'W', 'M'] # Ensure we have at least some options if not resampling_options: @@ -326,10 +365,12 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O friendly_options.append('Custom frequency string') # Create the selection widget - use_resampling = container.checkbox('Enable time resampling', value=False) + use_resampling = container.checkbox('Enable time resampling', value=False, key=f'{key_base}_enable') if use_resampling: - selected_freq_name = container.selectbox('Resample to:', options=friendly_options) + selected_freq_name = container.selectbox( + 'Resample to:', options=friendly_options, key=f'{key_base}_freq_select' + ) # Map back to actual frequency string selected_index = friendly_options.index(selected_freq_name) @@ -339,7 +380,7 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O return False, None elif selected_freq == 'custom': # Provide information about pandas frequency strings - with container.expander('Frequency string help'): + with container.expander('Frequency string help', key=f'{key_base}_help_expander'): container.write(""" **Pandas frequency strings examples:** - '5min': 5 minutes @@ -360,21 +401,31 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O 'Enter custom frequency string:', value='1D', # Default to daily help="Enter a pandas frequency string like '5min', '2H', '1D', '1W', '1M'", + key=f'{key_base}_custom_input', ) if custom_freq: # Validate the frequency string try: # Try to create a sample resampling to validate the string - test_dates = pd.date_range(start_time, periods=3, freq='D') + test_dates = pd.date_range('2020-01-01', periods=3, freq='D') test_series = pd.Series(range(3), index=test_dates) test_series.resample(custom_freq).mean() # If we get here, the frequency string is valid # Show information about what resampling will do try: - resampled = data.resample({time_dim: custom_freq}).mean() n_points_before = len(data[time_dim]) + + # Convert string dates to datetime if needed for resampling preview + if isinstance(data[time_dim].values[0], str): + # Create a temporary copy with datetime index for preview + temp_data = data.copy() + temp_data.coords[time_dim] = pd.to_datetime(temp_data[time_dim].values) + resampled = temp_data.resample({time_dim: custom_freq}).mean() + else: + resampled = data.resample({time_dim: custom_freq}).mean() + n_points_after = len(resampled[time_dim]) container.info(f'Resampling will change data points from {n_points_before} to {n_points_after}') except Exception as e: @@ -388,21 +439,30 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O return False, None else: # Show information about what resampling will do - n_points_before = len(data[time_dim]) - - # Calculate approximate number of points after resampling try: - resampled = data.resample({time_dim: selected_freq}).mean() + n_points_before = len(data[time_dim]) + + # Convert string dates to datetime if needed for resampling preview + if len(data[time_dim]) > 0 and isinstance(data[time_dim].values[0], str): + # Create a temporary copy with datetime index for preview + temp_data = data.copy() + temp_data.coords[time_dim] = pd.to_datetime(temp_data[time_dim].values) + resampled = temp_data.resample({time_dim: selected_freq}).mean() + else: + resampled = data.resample({time_dim: selected_freq}).mean() + n_points_after = len(resampled[time_dim]) - container.info(f'Resampling will change data points from {n_points_before} to {n_points_after}') + container.info( + f'Resampling will change data points from {n_points_before} to {n_points_after}', + key=f'{key_base}_info', + ) except Exception as e: - container.warning(f'Cannot preview resampling effect: {str(e)}') + container.warning(f'Cannot preview resampling effect: {str(e)}', key=f'{key_base}_warning') return True, selected_freq else: return False, None - def create_plotly_plot( data: xr.DataArray, plot_type: str, var_name: str, title: Optional[str] = None, x_dim: Optional[str] = None ) -> go.Figure: @@ -689,7 +749,7 @@ def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr. container = st # Dataset information - with container.expander('Dataset Overview'): + with container.expander('Dataset Overview', key="dataset_overview_expander"): container.write('### Dataset Metadata') container.write(dataset.attrs) @@ -707,11 +767,15 @@ def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr. 'Type': str(var.dtype), } ) - container.dataframe(pd.DataFrame(var_info)) + container.dataframe(pd.DataFrame(var_info), key="var_info_dataframe") # Variable selection - single variable only container.subheader('Variable Selection') - selected_var = container.selectbox('Select variable to explore', list(dataset.data_vars)) + selected_var = container.selectbox( + 'Select variable to explore', + list(dataset.data_vars), + key="variable_selector" + ) # Get the variable variable = dataset[selected_var] @@ -736,20 +800,36 @@ def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr. # Aggregation options container.subheader('Aggregation Options') - agg_dims = container.multiselect('Dimensions to aggregate', dims) - agg_method = container.selectbox('Aggregation method', ['mean', 'sum', 'min', 'max', 'std']) + agg_dims = container.multiselect( + 'Dimensions to aggregate', + dims, + key="agg_dims_selector" + ) + agg_method = container.selectbox( + 'Aggregation method', + ['mean', 'sum', 'min', 'max', 'std'], + key="agg_method_selector" + ) # Check if data has time dimension and add time resampling UI use_time_resampling, resampling_freq = get_time_aggregation_ui(container, variable) # Plot type selection - limited to the requested types container.subheader('Plot Settings') - plot_type = container.selectbox('Plot type', ['Line', 'Stacked Bar', 'Grouped Bar', 'Heatmap']) + plot_type = container.selectbox( + 'Plot type', + ['Line', 'Stacked Bar', 'Grouped Bar', 'Heatmap'], + key="plot_type_selector" + ) if plot_type in ['Line', 'Stacked Bar', 'Grouped Bar']: remaining_dims = [d for d in dims if d not in agg_dims] if remaining_dims: - x_dim = container.selectbox('X axis dimension', remaining_dims) + x_dim = container.selectbox( + 'X axis dimension', + remaining_dims, + key="x_dim_selector" + ) else: x_dim = None else: @@ -772,22 +852,25 @@ def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr. fig = create_plotly_plot(filtered_data, plot_type, selected_var, title=plot_title, x_dim=x_dim) # Show the plot - container.plotly_chart(fig, use_container_width=True) + container.plotly_chart(fig, use_container_width=True, key="main_plot") # Data preview - with container.expander('Data Preview'): - container.dataframe(filtered_data.to_dataframe()) + with container.expander('Data Preview', key="data_preview_expander"): + container.dataframe(filtered_data.to_dataframe(), key="filtered_data_preview") # Download options container.subheader('Download Options') - download_format = container.selectbox('Download format', ['CSV', 'NetCDF', 'Excel']) + download_format = container.selectbox( + 'Download format', + ['CSV', 'NetCDF', 'Excel'], + key="download_format_selector" + ) - if container.button('Download filtered data'): + if container.button('Download filtered data', key="download_button"): download_data(filtered_data, selected_var, download_format, container) return filtered_data - def explore_results_app(results): """ Main function to explore calculation results @@ -805,7 +888,7 @@ def explore_results_app(results): # Create sidebar for navigation st.sidebar.title("FlixOpt Results Explorer") - pages = ["Overview", "Components", "Buses", "Effects", "Explorer"] + pages = ["Overview", "Components", "Buses", "Effects", "Explorer", "Effects DS"] selected_page = st.sidebar.radio("Navigation", pages) # Overview page @@ -998,6 +1081,17 @@ def explore_results_app(results): st.title("Explorer") xarray_explorer_component(results.solution) + elif selected_page == "Effects DS": + st.title('Effects Dataset') + tabs = st.tabs(["total", "invest", "operation"]) + + with tabs[0]: + xarray_explorer_component(results.effects_per_component('total')) + with tabs[1]: + xarray_explorer_component(results.effects_per_component('invest')) + with tabs[2]: + xarray_explorer_component(results.effects_per_component('operation')) + def run_explorer_from_file(folder, name): """ From ba7b8ccf1053ce0e905f75bf16918dc6f219b722 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 12:29:36 +0200 Subject: [PATCH 73/87] Add traceback ui and add FLow rates and FLow Hours to streamlit app --- flixopt/explorer_app.py | 342 ++++++++++++++++++++++++++++++---------- 1 file changed, 257 insertions(+), 85 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 860bceabf..23f7466a2 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -4,20 +4,126 @@ import os import sys from pathlib import Path +import io +import tempfile +from typing import Dict, List, Optional, Union, Tuple, Any, Callable, cast, TypeVar +import traceback +import inspect import numpy as np +import functools import pandas as pd import streamlit as st import xarray as xr import plotly.express as px import plotly.graph_objects as go -import io -import tempfile -from typing import Dict, List, Optional, Union, Tuple, Any + +T = TypeVar('T') from flixopt import plotting +def show_traceback( + return_original_input: bool = False, include_args: bool = True, container: Optional[Any] = None +) -> Callable[[Callable[..., T]], Callable[..., T]]: + """ + A decorator that shows the full traceback in Streamlit when an exception occurs. + + Args: + return_original_input: If True and the first argument is not None, return it on error. + Useful for data processing functions to return original data. + include_args: If True, show function arguments in the error details. + container: Optional Streamlit container to display errors in. + If None, uses st directly. + + Usage: + @show_traceback() + def my_function(data, param1, param2): + # Your code here + + # Or with custom options: + @show_traceback(return_original_input=True, include_args=False) + def process_data(data, options): + # Your code here + + Returns: + The decorated function + """ + + def decorator(func: Callable[..., T]) -> Callable[..., T]: + @functools.wraps(func) + def wrapper(*args, **kwargs): + # Get display container + display = container if container is not None else st + + try: + return func(*args, **kwargs) + except Exception as e: + # Show error message + display.error(f'⚠️ Error in {func.__name__}: {str(e)}') + + # Create an expander for detailed error info + with display.expander('See detailed traceback'): + # Show the full traceback + display.code(traceback.format_exc(), language='python') + + # Show function info if requested + if include_args: + display.markdown('**Function Information:**') + + # Try to get source code + try: + display.code(inspect.getsource(func), language='python') + except: + display.warning('Could not retrieve function source code.') + + # Show arguments + display.markdown('**Function Arguments:**') + + # Safely represent args + safe_args = [] + for arg in args: + try: + repr_arg = repr(arg) + if len(repr_arg) > 200: # Truncate long representations + repr_arg = repr_arg[:200] + '...' + safe_args.append(repr_arg) + except: + safe_args.append('[Representation failed]') + + # Safely represent kwargs + safe_kwargs = {} + for k, v in kwargs.items(): + try: + repr_v = repr(v) + if len(repr_v) > 200: # Truncate long representations + repr_v = repr_v[:200] + '...' + safe_kwargs[k] = repr_v + except: + safe_kwargs[k] = '[Representation failed]' + + # Display args and kwargs + display.text(f'Args: {safe_args}') + display.text(f'Kwargs: {safe_kwargs}') + + # Also log to console/stderr for server logs + print(f'Exception in {func.__name__}:', file=sys.stderr) + traceback.print_exc(file=sys.stderr) + + # Determine what to return on error + if return_original_input and args and args[0] is not None: + # Return the first argument (usually the data being processed) + return args[0] + else: + # Return None as default + return None + + return cast(Callable[..., T], wrapper) + + return decorator + + +@show_traceback() def plot_heatmap( data: pd.DataFrame, name: str, @@ -45,7 +151,8 @@ def plot_heatmap( return fig -def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Optional[Union[slice, List]]: +@show_traceback() +def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any, st_key: str = '') -> Optional[Union[slice, List]]: """Creates UI elements to select values or ranges for a specific dimension. Args: @@ -72,7 +179,7 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt # Create unique keys for all widgets based on dimension name # This prevents duplicate widget ID errors - widget_key_base = f'dim_selector_{dim}' + widget_key_base = f'dim_selector_{dim}_{st_key}' # Case 1: Small number of values - always use multiselect regardless of type if len(values) <= 5: @@ -198,14 +305,14 @@ def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any) -> Opt return None +@show_traceback() def filter_and_aggregate( - dataset: xr.Dataset, var_name: str, filters: Dict[str, Union[slice, List]], agg_dims: List[str], agg_method: str + da: xr.DataArray, filters: Dict[str, Union[slice, List]], agg_dims: List[str], agg_method: str ) -> xr.DataArray: """Filters and aggregates a variable from the dataset. Args: - dataset: The dataset containing the variable. - var_name: Name of the variable to process. + da: The dataarray. filters: Dictionary of dimension filters. agg_dims: Dimensions to aggregate over. agg_method: Aggregation method (mean, sum, etc.). @@ -213,14 +320,12 @@ def filter_and_aggregate( Returns: Filtered and aggregated data. """ - # Get the variable - variable = dataset[var_name] # Filter the data if filters: - filtered_data = variable.sel(**filters) + filtered_data = da.sel(**filters) else: - filtered_data = variable + filtered_data = da # Apply aggregation if selected if agg_dims and agg_method: @@ -238,6 +343,46 @@ def filter_and_aggregate( return filtered_data +@show_traceback() +def filter_and_aggregate_dataarray( + dataarray: xr.DataArray, filters: dict, agg_dims: list, agg_method: str +) -> xr.DataArray: + """Filter and aggregate a DataArray. + + Args: + dataarray: The xarray DataArray to process. + filters: Dictionary of dimension filters. + agg_dims: List of dimensions to aggregate over. + agg_method: Aggregation method ('mean', 'sum', etc.) + + Returns: + Filtered and aggregated DataArray. + """ + # Apply filters + filtered_data = dataarray + for dim, dim_filter in filters.items(): + if isinstance(dim_filter, slice): + filtered_data = filtered_data.sel({dim: dim_filter}) + elif isinstance(dim_filter, (list, np.ndarray)): + filtered_data = filtered_data.sel({dim: dim_filter}) + + # Apply aggregation if requested + if agg_dims and len(agg_dims) > 0: + if agg_method == 'mean': + filtered_data = filtered_data.mean(dim=agg_dims) + elif agg_method == 'sum': + filtered_data = filtered_data.sum(dim=agg_dims) + elif agg_method == 'min': + filtered_data = filtered_data.min(dim=agg_dims) + elif agg_method == 'max': + filtered_data = filtered_data.max(dim=agg_dims) + elif agg_method == 'std': + filtered_data = filtered_data.std(dim=agg_dims) + + return filtered_data + + +@show_traceback() def resample_time_data(data: xr.DataArray, freq: str) -> xr.DataArray: """Resamples a DataArray along its time dimension. @@ -266,7 +411,8 @@ def resample_time_data(data: xr.DataArray, freq: str) -> xr.DataArray: return data -def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, Optional[str]]: +@show_traceback() +def get_time_aggregation_ui(container: Any, data: xr.DataArray, st_key: str = '') -> Tuple[bool, Optional[str]]: """Creates UI elements for time-based aggregation options. Args: @@ -291,7 +437,7 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O return False, None # Create unique key base for all widgets - key_base = f'time_resampling_{time_dims[0]}' + key_base = f'time_resampling_{time_dims[0]}_{st_key}' # Show time resampling options container.subheader('Time Resampling') @@ -380,7 +526,7 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O return False, None elif selected_freq == 'custom': # Provide information about pandas frequency strings - with container.expander('Frequency string help', key=f'{key_base}_help_expander'): + with container.expander('Frequency string help'): container.write(""" **Pandas frequency strings examples:** - '5min': 5 minutes @@ -463,6 +609,8 @@ def get_time_aggregation_ui(container: Any, data: xr.DataArray) -> Tuple[bool, O else: return False, None + +@show_traceback() def create_plotly_plot( data: xr.DataArray, plot_type: str, var_name: str, title: Optional[str] = None, x_dim: Optional[str] = None ) -> go.Figure: @@ -693,6 +841,7 @@ def create_plotly_plot( return fig +@show_traceback() def download_data(filtered_data: xr.DataArray, var_name: str, download_format: str, container: Any) -> None: """Creates download buttons for the filtered data. @@ -730,55 +879,76 @@ def download_data(filtered_data: xr.DataArray, var_name: str, download_format: s ) -def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr.DataArray: - """A reusable Streamlit component that creates an xarray dataset explorer. +@show_traceback() +def xarray_explorer_component( + data: Union[xr.Dataset, xr.DataArray], container: Any = None, st_key: Optional[str] = None +) -> Union[xr.DataArray, xr.Dataset]: + """A reusable Streamlit component that creates an xarray data explorer. - This component allows users to interactively explore an xarray Dataset by - selecting variables, filtering dimensions, and creating visualizations. + This component allows users to interactively explore an xarray Dataset or DataArray by + selecting variables (for Dataset), filtering dimensions, and creating visualizations. Args: - dataset: The xarray Dataset to explore. + data: The xarray Dataset or DataArray to explore. container: The Streamlit container to render the explorer in. If None, renders in the current Streamlit app context. + st_key: Optional key prefix for Streamlit widget keys to avoid duplicates. Returns: - The filtered/selected data for the selected variable. + The filtered/selected data. """ # If no container is provided, use the current Streamlit context if container is None: container = st - # Dataset information - with container.expander('Dataset Overview', key="dataset_overview_expander"): - container.write('### Dataset Metadata') - container.write(dataset.attrs) + # Determine if we're dealing with a Dataset or DataArray + is_dataset = isinstance(data, xr.Dataset) - container.write('### Dimensions') - container.write(pd.DataFrame({'Dimension': list(dataset.dims.keys()), 'Size': list(dataset.dims.values())})) - - container.write('### Variables') - var_info = [] - for var_name, var in dataset.variables.items(): - var_info.append( - { - 'Variable': var_name, - 'Dimensions': ', '.join(var.dims), - 'Shape': str(var.shape), - 'Type': str(var.dtype), - } - ) - container.dataframe(pd.DataFrame(var_info), key="var_info_dataframe") - - # Variable selection - single variable only - container.subheader('Variable Selection') - selected_var = container.selectbox( - 'Select variable to explore', - list(dataset.data_vars), - key="variable_selector" - ) + # Dataset/DataArray information + with container.expander('Data Overview'): + if is_dataset: + container.write('### Dataset Metadata') + else: + container.write('### DataArray Metadata') - # Get the variable - variable = dataset[selected_var] + container.write(data.attrs) + + container.write('### Dimensions') + #container.write(pd.DataFrame({'Dimension': list(data.indexes.keys()), 'Size': list(data.indexes.values())})) + + if is_dataset: + container.write('### Variables') + var_info = [] + for var_name, var in data.variables.items(): + var_info.append( + { + 'Variable': var_name, + 'Dimensions': ', '.join(var.dims), + 'Shape': str(var.shape), + 'Type': str(var.dtype), + } + ) + container.dataframe(pd.DataFrame(var_info), key=f'{st_key}_var_info_dataframe') + else: + # For DataArray, show its basic info + container.write('### DataArray Information') + container.write(f'**Name:** {data.name}') + container.write(f'**Type:** {data.dtype}') + + # For Dataset: Variable selection + # For DataArray: Just display the variable name + if is_dataset: + container.subheader('Variable Selection') + selected_var = container.selectbox( + 'Select variable to explore', list(data.data_vars), key=f'{st_key}_variable_selector' + ) + # Get the variable + variable = data[selected_var] + else: + # For DataArray, we already have the variable + variable = data + selected_var = variable.name if variable.name is not None else 'DataArray' + container.subheader(f'Exploring: {selected_var}') # Display variable info container.write(f'**Variable shape:** {variable.shape}') @@ -794,49 +964,41 @@ def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr. # Set filters for each dimension filters = {} for dim in dims: - dim_filter = get_dimension_selector(dataset, dim, container) + dim_filter = get_dimension_selector(data, dim, container, st_key=st_key) if dim_filter is not None: filters[dim] = dim_filter # Aggregation options container.subheader('Aggregation Options') - agg_dims = container.multiselect( - 'Dimensions to aggregate', - dims, - key="agg_dims_selector" - ) + agg_dims = container.multiselect('Dimensions to aggregate', dims, key=f'{st_key}_agg_dims_selector') agg_method = container.selectbox( - 'Aggregation method', - ['mean', 'sum', 'min', 'max', 'std'], - key="agg_method_selector" + 'Aggregation method', ['mean', 'sum', 'min', 'max', 'std'], key=f'{st_key}_agg_method_selector' ) # Check if data has time dimension and add time resampling UI - use_time_resampling, resampling_freq = get_time_aggregation_ui(container, variable) + use_time_resampling, resampling_freq = get_time_aggregation_ui(container, variable, st_key=st_key) # Plot type selection - limited to the requested types container.subheader('Plot Settings') plot_type = container.selectbox( - 'Plot type', - ['Line', 'Stacked Bar', 'Grouped Bar', 'Heatmap'], - key="plot_type_selector" + 'Plot type', ['Line', 'Stacked Bar', 'Grouped Bar', 'Heatmap'], key=f'{st_key}_plot_type_selector' ) if plot_type in ['Line', 'Stacked Bar', 'Grouped Bar']: remaining_dims = [d for d in dims if d not in agg_dims] if remaining_dims: - x_dim = container.selectbox( - 'X axis dimension', - remaining_dims, - key="x_dim_selector" - ) + x_dim = container.selectbox('X axis dimension', remaining_dims, key=f'{st_key}_x_dim_selector') else: x_dim = None else: x_dim = None # Filter and aggregate the selected variable - filtered_data = filter_and_aggregate(dataset, selected_var, filters, agg_dims, agg_method) + if is_dataset: + filtered_data = filter_and_aggregate(data[selected_var], filters, agg_dims, agg_method) + else: + # For DataArray, we don't need to select a variable + filtered_data = filter_and_aggregate(data, filters, agg_dims, agg_method) # Apply time resampling if requested if use_time_resampling and resampling_freq: @@ -849,28 +1011,30 @@ def xarray_explorer_component(dataset: xr.Dataset, container: Any = None) -> xr. # Create the plot plot_title = f'{selected_var} {plot_type} Plot' - fig = create_plotly_plot(filtered_data, plot_type, selected_var, title=plot_title, x_dim=x_dim) + fig = create_plotly_plot( + filtered_data, plot_type, selected_var if is_dataset else None, title=plot_title, x_dim=x_dim + ) # Show the plot - container.plotly_chart(fig, use_container_width=True, key="main_plot") + container.plotly_chart(fig, use_container_width=True) # Data preview - with container.expander('Data Preview', key="data_preview_expander"): - container.dataframe(filtered_data.to_dataframe(), key="filtered_data_preview") + with container.expander('Data Preview'): + container.dataframe(filtered_data.to_dataframe()) # Download options container.subheader('Download Options') download_format = container.selectbox( - 'Download format', - ['CSV', 'NetCDF', 'Excel'], - key="download_format_selector" + 'Download format', ['CSV', 'NetCDF', 'Excel'], key=f'{st_key}_download_format_selector' ) - if container.button('Download filtered data', key="download_button"): + if container.button('Download filtered data', key=f'{st_key}_download_button'): download_data(filtered_data, selected_var, download_format, container) return filtered_data + +@show_traceback() def explore_results_app(results): """ Main function to explore calculation results @@ -888,7 +1052,7 @@ def explore_results_app(results): # Create sidebar for navigation st.sidebar.title("FlixOpt Results Explorer") - pages = ["Overview", "Components", "Buses", "Effects", "Explorer", "Effects DS"] + pages = ["Overview", "Components", "Buses", "Effects", "Flows DS", "Effects DS", "Explorer"] selected_page = st.sidebar.radio("Navigation", pages) # Overview page @@ -1077,20 +1241,28 @@ def explore_results_app(results): xarray_explorer_component(effect.solution) - elif selected_page == "Explorer": - st.title("Explorer") - xarray_explorer_component(results.solution) + elif selected_page == "Flows DS": + st.title('Flow Rates Dataset') + mode = st.selectbox("Select a mode", ['Flow Rates', 'Flow Hours']) + if mode == 'Flow Hours': + xarray_explorer_component(results.flow_hours()) + else: + xarray_explorer_component(results.flow_rates()) - elif selected_page == "Effects DS": + elif selected_page == 'Effects DS': st.title('Effects Dataset') tabs = st.tabs(["total", "invest", "operation"]) with tabs[0]: - xarray_explorer_component(results.effects_per_component('total')) + xarray_explorer_component(results.effects_per_component('total'), st_key='total') with tabs[1]: - xarray_explorer_component(results.effects_per_component('invest')) + xarray_explorer_component(results.effects_per_component('invest'), st_key='invest') with tabs[2]: - xarray_explorer_component(results.effects_per_component('operation')) + xarray_explorer_component(results.effects_per_component('operation'), st_key='operation') + + elif selected_page == "Explorer": + st.title("Explore all variable results") + xarray_explorer_component(results.solution) def run_explorer_from_file(folder, name): From 85c1062c5cdeff25bca23a2c0c131a822c05c79c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 13:00:12 +0200 Subject: [PATCH 74/87] Greatly simplify the streamlit app to use a unified xarray explorer --- flixopt/explorer_app.py | 1053 +++++++-------------------------------- 1 file changed, 183 insertions(+), 870 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 23f7466a2..0c0e7112c 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -3,7 +3,6 @@ import argparse import os import sys -from pathlib import Path import io import tempfile from typing import Dict, List, Optional, Union, Tuple, Any, Callable, cast, TypeVar @@ -20,8 +19,6 @@ T = TypeVar('T') -from flixopt import plotting - def show_traceback( return_original_input: bool = False, include_args: bool = True, container: Optional[Any] = None @@ -123,724 +120,6 @@ def wrapper(*args, **kwargs): return decorator -@show_traceback() -def plot_heatmap( - data: pd.DataFrame, - name: str, - timeframes: str, - timesteps: str, - color_map: str, -) -> go.Figure: - fig = plotting.heat_map_plotly( - plotting.heat_map_data_from_df(data, timeframes, timesteps, 'ffill'), - title=name, - color_map=color_map, - xlabel=f'timeframe [{timeframes}]', - ylabel=f'timesteps [{timesteps}]', - ) - fig.update_layout( - margin=dict(l=50, r=100, t=50, b=50), # Extra space for colorbar - coloraxis_colorbar=dict( - lenmode='fraction', - len=0.8, - title='Scale', - tickvals=[0, 5, 10], # Force ticks at min, mid, max - ticktext=['0 (Min)', '5', '10 (Max)'], # Custom labels - ), # Make colorbar bigger - ) - return fig - - -@show_traceback() -def get_dimension_selector(dataset: xr.Dataset, dim: str, container: Any, st_key: str = '') -> Optional[Union[slice, List]]: - """Creates UI elements to select values or ranges for a specific dimension. - - Args: - dataset: The dataset containing the dimension. - dim: The dimension name. - container: The streamlit container to render UI elements in. - - Returns: - The selected filter for this dimension (either a slice or list of values). - Returns None if no selection is made. - """ - container.write(f'**{dim}** (size: {len(dataset[dim])})') - - # Get the values for this dimension - values = dataset[dim].values - - # Check if we have no data to work with - if len(values) == 0: - container.warning(f"Dimension '{dim}' is empty") - return None - - # Determine the data type of the dimension - first_val = values[0] - - # Create unique keys for all widgets based on dimension name - # This prevents duplicate widget ID errors - widget_key_base = f'dim_selector_{dim}_{st_key}' - - # Case 1: Small number of values - always use multiselect regardless of type - if len(values) <= 5: - # For datetime64, convert to readable datetime objects - if isinstance(first_val, np.datetime64): - values = [pd.to_datetime(str(val)) for val in values] - - selected = container.multiselect( - f'Select {dim} values', options=values, default=[values[0]], key=f'{widget_key_base}_small_multiselect' - ) - if selected: - return selected - return None - - # Case 2: Datetime values - use date picker - elif isinstance(first_val, np.datetime64): - date_min = pd.to_datetime(str(dataset[dim].min().values)) - date_max = pd.to_datetime(str(dataset[dim].max().values)) - start_date, end_date = container.date_input( - f'Select {dim} range', - value=(date_min, min(date_min + pd.Timedelta(days=30), date_max)), - min_value=date_min, - max_value=date_max, - key=f'{widget_key_base}_date_input', - ) - return slice(str(start_date), str(end_date)) - - # Case 3: String values (categorical data) - use multiselect with limiting features - elif isinstance(first_val, str): - # For string values, provide a way to select multiple values - # First, get unique values and sort them - unique_values = sorted(list(set(values))) - - # If we have too many unique values, provide a selection mechanism - if len(unique_values) > 20: - container.warning(f"Dimension '{dim}' has {len(unique_values)} unique string values. Showing first 20.") - - # Option to show all values or search - show_all = container.checkbox( - f"Show all values for '{dim}'", value=False, key=f'{widget_key_base}_show_all' - ) - - if show_all: - # Show all values but provide a text search to filter - search_term = container.text_input(f"Filter values for '{dim}'", '', key=f'{widget_key_base}_search') - - if search_term: - # Filter values that contain the search term - filtered_values = [val for val in unique_values if search_term.lower() in str(val).lower()] - if not filtered_values: - container.warning(f"No values matching '{search_term}'") - return None - - unique_values = filtered_values - else: - # Just show the first 20 values - unique_values = unique_values[:20] - - # Display multiselect with available values - selected = container.multiselect( - f'Select {dim} values', - options=unique_values, - default=[unique_values[0]] if unique_values else [], - key=f'{widget_key_base}_str_multiselect', - ) - - if selected: - return selected - return None - - # Case 4: Numeric values - use slider - elif np.issubdtype(type(first_val), np.number) or isinstance(first_val, (int, float)): - try: - min_val = float(dataset[dim].min().values) - max_val = float(dataset[dim].max().values) - - # Check for identical min/max values - if min_val == max_val: - container.info(f"All values in dimension '{dim}' are identical: {min_val}") - return None - - # Determine appropriate step size - range_size = max_val - min_val - if range_size < 1: - step = range_size / 100 - elif range_size < 10: - step = 0.1 - elif range_size < 100: - step = 1 - else: - step = range_size / 100 - - # Round values for better UI - range_val = container.slider( - f'Select {dim} range', - min_value=min_val, - max_value=max_val, - value=(min_val, min(min_val + range_size / 10, max_val)), - step=step, - key=f'{widget_key_base}_slider', - ) - return slice(range_val[0], range_val[1]) - except Exception as e: - container.error(f"Error creating slider for '{dim}': {e}") - return None - - # Case 5: Unknown/Unhandled type - fallback to multiselect with first 20 values - else: - container.warning(f"Dimension '{dim}' has an unusual data type. Using simple selection.") - - # Limit to first 20 values to avoid overwhelming the UI - display_values = list(values)[:20] - - selected = container.multiselect( - f'Select {dim} values (first 20 shown)', - options=display_values, - default=[display_values[0]] if display_values else [], - key=f'{widget_key_base}_fallback_multiselect', - ) - - if selected: - return selected - return None - - -@show_traceback() -def filter_and_aggregate( - da: xr.DataArray, filters: Dict[str, Union[slice, List]], agg_dims: List[str], agg_method: str -) -> xr.DataArray: - """Filters and aggregates a variable from the dataset. - - Args: - da: The dataarray. - filters: Dictionary of dimension filters. - agg_dims: Dimensions to aggregate over. - agg_method: Aggregation method (mean, sum, etc.). - - Returns: - Filtered and aggregated data. - """ - - # Filter the data - if filters: - filtered_data = da.sel(**filters) - else: - filtered_data = da - - # Apply aggregation if selected - if agg_dims and agg_method: - if agg_method == 'mean': - filtered_data = filtered_data.mean(dim=agg_dims) - elif agg_method == 'sum': - filtered_data = filtered_data.sum(dim=agg_dims) - elif agg_method == 'min': - filtered_data = filtered_data.min(dim=agg_dims) - elif agg_method == 'max': - filtered_data = filtered_data.max(dim=agg_dims) - elif agg_method == 'std': - filtered_data = filtered_data.std(dim=agg_dims) - - return filtered_data - - -@show_traceback() -def filter_and_aggregate_dataarray( - dataarray: xr.DataArray, filters: dict, agg_dims: list, agg_method: str -) -> xr.DataArray: - """Filter and aggregate a DataArray. - - Args: - dataarray: The xarray DataArray to process. - filters: Dictionary of dimension filters. - agg_dims: List of dimensions to aggregate over. - agg_method: Aggregation method ('mean', 'sum', etc.) - - Returns: - Filtered and aggregated DataArray. - """ - # Apply filters - filtered_data = dataarray - for dim, dim_filter in filters.items(): - if isinstance(dim_filter, slice): - filtered_data = filtered_data.sel({dim: dim_filter}) - elif isinstance(dim_filter, (list, np.ndarray)): - filtered_data = filtered_data.sel({dim: dim_filter}) - - # Apply aggregation if requested - if agg_dims and len(agg_dims) > 0: - if agg_method == 'mean': - filtered_data = filtered_data.mean(dim=agg_dims) - elif agg_method == 'sum': - filtered_data = filtered_data.sum(dim=agg_dims) - elif agg_method == 'min': - filtered_data = filtered_data.min(dim=agg_dims) - elif agg_method == 'max': - filtered_data = filtered_data.max(dim=agg_dims) - elif agg_method == 'std': - filtered_data = filtered_data.std(dim=agg_dims) - - return filtered_data - - -@show_traceback() -def resample_time_data(data: xr.DataArray, freq: str) -> xr.DataArray: - """Resamples a DataArray along its time dimension. - - Args: - data: The xarray DataArray containing a time dimension. - freq: The resampling frequency string (e.g., 'D', 'M', 'Y', '5min'). - - Returns: - The resampled DataArray. - """ - # Find the time dimension name - time_dims = [dim for dim in data.dims if dim in ['time', 't'] or (isinstance(data[dim].values[0], np.datetime64))] - - if not time_dims: - # No time dimension found - return data - - time_dim = time_dims[0] - - try: - # Resample the data - default aggregation is mean - resampled_data = data.resample({time_dim: freq}).mean() - return resampled_data - except Exception as e: - print(f'Error resampling data: {e}') - return data - - -@show_traceback() -def get_time_aggregation_ui(container: Any, data: xr.DataArray, st_key: str = '') -> Tuple[bool, Optional[str]]: - """Creates UI elements for time-based aggregation options. - - Args: - container: The streamlit container to render UI elements in. - data: The xarray DataArray to check for time dimensions. - - Returns: - A tuple containing: - - Boolean indicating if time resampling should be applied - - The selected resampling frequency (or None if no resampling) - """ - # Find time dimensions - time_dims = [ - dim - for dim in data.dims - if dim in ['time', 't', 'date', 'datetime'] - or (len(data[dim]) > 0 and isinstance(data[dim].values[0], np.datetime64)) - ] - - if not time_dims: - # No time dimensions - return False, None - - # Create unique key base for all widgets - key_base = f'time_resampling_{time_dims[0]}_{st_key}' - - # Show time resampling options - container.subheader('Time Resampling') - time_dim = time_dims[0] - - # Check if the dimension has enough elements to be worth resampling - min_elements_for_resampling = 5 - if len(data[time_dim]) < min_elements_for_resampling: - container.info(f"Time dimension '{time_dim}' has too few elements for resampling.") - return False, None - - # Get the time range for display - try: - # Convert to pandas datetime for safe handling of different datetime formats - time_values = data[time_dim].values - if isinstance(time_values[0], str): - # Try to convert string dates to datetime - time_values = pd.to_datetime(time_values) - - start_time = pd.to_datetime(time_values[0]) - end_time = pd.to_datetime(time_values[-1]) - time_range = end_time - start_time - - # Show time range information - container.write(f'Time range: {start_time.date()} to {end_time.date()} ({time_range.days} days)') - except Exception as e: - container.warning(f'Error determining time range: {e}') - # Even if there's an error showing the range, we can still offer resampling - time_range = pd.Timedelta(days=365) # Assume a 1-year range as default - - # Determine appropriate resampling options based on the time range - resampling_options = [] - - try: - days = time_range.days - - # Always include options for hourly/daily data - if days >= 2: - resampling_options.extend(['H', 'D']) - - # For data spanning more than a week - if days > 7: - resampling_options.extend(['W']) - - # For data spanning more than a month - if days > 30: - resampling_options.extend(['M']) - - # For data spanning more than a year - if days > 365: - resampling_options.extend(['Q', 'Y']) - except: - # Fallback options if we can't determine from time range - resampling_options = ['H', 'D', 'W', 'M'] - - # Ensure we have at least some options - if not resampling_options: - resampling_options = ['H', 'D', 'W', 'M'] - - # Create friendly names for UI - freq_map = {'H': 'Hour', 'D': 'Day', 'W': 'Week', 'M': 'Month', 'Q': 'Quarter', 'Y': 'Year'} - - friendly_options = [freq_map.get(opt, opt) for opt in resampling_options] - - # Add "None" option for no resampling - resampling_options = ['none'] + resampling_options - friendly_options = ['None (original data)'] + friendly_options - - # Add "Custom" option - resampling_options.append('custom') - friendly_options.append('Custom frequency string') - - # Create the selection widget - use_resampling = container.checkbox('Enable time resampling', value=False, key=f'{key_base}_enable') - - if use_resampling: - selected_freq_name = container.selectbox( - 'Resample to:', options=friendly_options, key=f'{key_base}_freq_select' - ) - - # Map back to actual frequency string - selected_index = friendly_options.index(selected_freq_name) - selected_freq = resampling_options[selected_index] - - if selected_freq == 'none': - return False, None - elif selected_freq == 'custom': - # Provide information about pandas frequency strings - with container.expander('Frequency string help'): - container.write(""" - **Pandas frequency strings examples:** - - '5min': 5 minutes - - '2H': 2 hours - - '1D': 1 day - - '1W': 1 week - - '2W-MON': Biweekly on Monday - - '1M': 1 month - - '1Q': 1 quarter - - '1A' or '1Y': 1 year - - '3A': 3 years - - You can also use combinations like '1D12H' for 1 day and 12 hours. - """) - - # Allow user to input a custom frequency string - custom_freq = container.text_input( - 'Enter custom frequency string:', - value='1D', # Default to daily - help="Enter a pandas frequency string like '5min', '2H', '1D', '1W', '1M'", - key=f'{key_base}_custom_input', - ) - - if custom_freq: - # Validate the frequency string - try: - # Try to create a sample resampling to validate the string - test_dates = pd.date_range('2020-01-01', periods=3, freq='D') - test_series = pd.Series(range(3), index=test_dates) - test_series.resample(custom_freq).mean() - - # If we get here, the frequency string is valid - # Show information about what resampling will do - try: - n_points_before = len(data[time_dim]) - - # Convert string dates to datetime if needed for resampling preview - if isinstance(data[time_dim].values[0], str): - # Create a temporary copy with datetime index for preview - temp_data = data.copy() - temp_data.coords[time_dim] = pd.to_datetime(temp_data[time_dim].values) - resampled = temp_data.resample({time_dim: custom_freq}).mean() - else: - resampled = data.resample({time_dim: custom_freq}).mean() - - n_points_after = len(resampled[time_dim]) - container.info(f'Resampling will change data points from {n_points_before} to {n_points_after}') - except Exception as e: - container.warning(f'Cannot preview resampling effect: {str(e)}') - - return True, custom_freq - except Exception as e: - container.error(f'Invalid frequency string: {str(e)}') - return False, None - else: - return False, None - else: - # Show information about what resampling will do - try: - n_points_before = len(data[time_dim]) - - # Convert string dates to datetime if needed for resampling preview - if len(data[time_dim]) > 0 and isinstance(data[time_dim].values[0], str): - # Create a temporary copy with datetime index for preview - temp_data = data.copy() - temp_data.coords[time_dim] = pd.to_datetime(temp_data[time_dim].values) - resampled = temp_data.resample({time_dim: selected_freq}).mean() - else: - resampled = data.resample({time_dim: selected_freq}).mean() - - n_points_after = len(resampled[time_dim]) - container.info( - f'Resampling will change data points from {n_points_before} to {n_points_after}', - key=f'{key_base}_info', - ) - except Exception as e: - container.warning(f'Cannot preview resampling effect: {str(e)}', key=f'{key_base}_warning') - - return True, selected_freq - else: - return False, None - - -@show_traceback() -def create_plotly_plot( - data: xr.DataArray, plot_type: str, var_name: str, title: Optional[str] = None, x_dim: Optional[str] = None -) -> go.Figure: - """Creates a plotly plot based on the selected data and plot type. - - Args: - data: The filtered/aggregated data array to plot. - plot_type: Type of plot to create (Line, Stacked Bar, Grouped Bar, or Heatmap). - var_name: Name of the selected variable. - title: Plot title. - x_dim: Dimension to use for x-axis in line plots. - - Returns: - Plotly figure object. - """ - # Check if we have valid data to plot - if data is None: - return go.Figure().update_layout( - title='No data to plot', - annotations=[ - dict( - text='No valid data found for plotting. Check your selections.', - showarrow=False, - xref='paper', - yref='paper', - x=0.5, - y=0.5, - ) - ], - ) - - # Get dimensions of the data array - dims = list(data.dims) - - # Create different plot types based on dimensions and selection - if plot_type == 'Line': - # Line plot - if len(dims) == 1: - # Simple line plot for 1D data - x_values = data[dims[0]].values - y_values = data.values - - fig = px.line(x=x_values, y=y_values, labels={'x': dims[0], 'y': var_name}, title=title) - - elif len(dims) >= 2 and x_dim is not None: - # Multiple lines for higher dimensional data - # Convert to dataframe for easy plotting - df = data.to_dataframe().reset_index() - - # Group by the x dimension - group_dims = [d for d in dims if d != x_dim] - - if len(group_dims) == 0: - # If no grouping dimensions, just plot a single line - fig = px.line(df, x=x_dim, y=var_name, title=title) - else: - # Create a plot with a line for each unique combination of group dimensions - fig = px.line( - df, - x=x_dim, - y=var_name, - color=group_dims[0] if len(group_dims) == 1 else None, # Use first group dim for color - facet_col=group_dims[1] if len(group_dims) > 1 else None, # Use second group dim for faceting - title=title, - ) - else: - # Not enough dimensions for line plot - fig = go.Figure().update_layout( - title='Cannot create Line plot', - annotations=[ - dict( - text='Need at least one dimension for Line plot', - showarrow=False, - xref='paper', - yref='paper', - x=0.5, - y=0.5, - ) - ], - ) - - elif plot_type == 'Stacked Bar': - if len(dims) >= 2: - # Convert to dataframe - df = data.to_dataframe().reset_index() - - # For stacked bar, need a category dimension and a value dimension - if x_dim is not None and x_dim in dims: - # Use the selected x dimension - x = x_dim - # Get another dimension for stacking - stack_dim = next((d for d in dims if d != x_dim), None) - - if stack_dim: - fig = px.bar(df, x=x, y=var_name, color=stack_dim, barmode='stack', title=title) - else: - # No dimension to stack - fig = px.bar(df, x=x, y=var_name, title=title) - else: - # Default to first dimension for x-axis - x = dims[0] - stack_dim = dims[1] if len(dims) > 1 else None - - fig = px.bar(df, x=x, y=var_name, color=stack_dim, barmode='stack', title=title) - elif len(dims) == 1: - # Single dimension bar plot - df = data.to_dataframe().reset_index() - - fig = px.bar(df, x=dims[0], y=var_name, title=title) - else: - # Not enough dimensions - fig = go.Figure().update_layout( - title='Cannot create Stacked Bar plot', - annotations=[ - dict( - text='Need at least one dimension for Stacked Bar plot', - showarrow=False, - xref='paper', - yref='paper', - x=0.5, - y=0.5, - ) - ], - ) - - elif plot_type == 'Grouped Bar': - if len(dims) >= 2: - # Convert to dataframe - df = data.to_dataframe().reset_index() - - # For grouped bar, need a category dimension and a group dimension - if x_dim is not None and x_dim in dims: - # Use the selected x dimension - x = x_dim - # Get another dimension for grouping - group_dim = next((d for d in dims if d != x_dim), None) - - if group_dim: - fig = px.bar(df, x=x, y=var_name, color=group_dim, barmode='group', title=title) - else: - # No dimension to group - fig = px.bar(df, x=x, y=var_name, title=title) - else: - # Default to first dimension for x-axis - x = dims[0] - group_dim = dims[1] if len(dims) > 1 else None - - fig = px.bar(df, x=x, y=var_name, color=group_dim, barmode='group', title=title) - elif len(dims) == 1: - # Single dimension bar plot - df = data.to_dataframe().reset_index() - - fig = px.bar(df, x=dims[0], y=var_name, title=title) - else: - # Not enough dimensions - fig = go.Figure().update_layout( - title='Cannot create Grouped Bar plot', - annotations=[ - dict( - text='Need at least one dimension for Grouped Bar plot', - showarrow=False, - xref='paper', - yref='paper', - x=0.5, - y=0.5, - ) - ], - ) - - elif plot_type == 'Heatmap' and len(dims) >= 2: - # Heatmap for 2D data - if len(dims) > 2: - # If more than 2 dimensions, need to select which dimensions to use - if x_dim is not None and x_dim in dims: - # Use x_dim and find another dimension - dim1 = x_dim - dim2 = next((d for d in dims if d != x_dim), None) - - # Need to aggregate other dimensions - agg_dims = [d for d in dims if d != dim1 and d != dim2] - if agg_dims: - # Aggregate other dimensions using mean - data = data.mean(dim=agg_dims) - else: - # Use first two dimensions - dim1, dim2 = dims[:2] - - # Aggregate other dimensions if needed - if len(dims) > 2: - agg_dims = dims[2:] - data = data.mean(dim=agg_dims) - else: - dim1, dim2 = dims - - # Create heatmap - fig = px.imshow( - data.values, - x=data[dim1].values, - y=data[dim2].values, - labels=dict(x=dim1, y=dim2, color=var_name), - title=title, - color_continuous_scale='Viridis', - ) - else: - # Default empty plot with warning - fig = go.Figure().update_layout( - title='Cannot create plot', - annotations=[ - dict( - text=f'Cannot create {plot_type} plot with the current data dimensions', - showarrow=False, - xref='paper', - yref='paper', - x=0.5, - y=0.5, - ) - ], - ) - - # Common layout settings - fig.update_layout( - height=600, - width=800, - margin=dict(l=50, r=50, t=50, b=50), - legend=dict(orientation='h', yanchor='bottom', y=1.02, xanchor='right', x=1), - ) - - return fig - - @show_traceback() def download_data(filtered_data: xr.DataArray, var_name: str, download_format: str, container: Any) -> None: """Creates download buttons for the filtered data. @@ -880,158 +159,198 @@ def download_data(filtered_data: xr.DataArray, var_name: str, download_format: s @show_traceback() -def xarray_explorer_component( - data: Union[xr.Dataset, xr.DataArray], container: Any = None, st_key: Optional[str] = None -) -> Union[xr.DataArray, xr.Dataset]: - """A reusable Streamlit component that creates an xarray data explorer. - - This component allows users to interactively explore an xarray Dataset or DataArray by - selecting variables (for Dataset), filtering dimensions, and creating visualizations. +def xarray_explorer(data: Union[xr.Dataset, xr.DataArray]): + """ + A simple xarray explorer for both DataArrays and Datasets. + Just pass your xarray object to this function. Args: - data: The xarray Dataset or DataArray to explore. - container: The Streamlit container to render the explorer in. - If None, renders in the current Streamlit app context. - st_key: Optional key prefix for Streamlit widget keys to avoid duplicates. - - Returns: - The filtered/selected data. + data: xarray.Dataset or xarray.DataArray """ - # If no container is provided, use the current Streamlit context - if container is None: - container = st - - # Determine if we're dealing with a Dataset or DataArray + # Determine if we're working with Dataset or DataArray is_dataset = isinstance(data, xr.Dataset) - # Dataset/DataArray information - with container.expander('Data Overview'): - if is_dataset: - container.write('### Dataset Metadata') - else: - container.write('### DataArray Metadata') - - container.write(data.attrs) - - container.write('### Dimensions') - #container.write(pd.DataFrame({'Dimension': list(data.indexes.keys()), 'Size': list(data.indexes.values())})) - - if is_dataset: - container.write('### Variables') - var_info = [] - for var_name, var in data.variables.items(): - var_info.append( - { - 'Variable': var_name, - 'Dimensions': ', '.join(var.dims), - 'Shape': str(var.shape), - 'Type': str(var.dtype), - } - ) - container.dataframe(pd.DataFrame(var_info), key=f'{st_key}_var_info_dataframe') - else: - # For DataArray, show its basic info - container.write('### DataArray Information') - container.write(f'**Name:** {data.name}') - container.write(f'**Type:** {data.dtype}') - - # For Dataset: Variable selection - # For DataArray: Just display the variable name + # Variable selection for Dataset or direct visualization for DataArray if is_dataset: - container.subheader('Variable Selection') - selected_var = container.selectbox( - 'Select variable to explore', list(data.data_vars), key=f'{st_key}_variable_selector' - ) - # Get the variable - variable = data[selected_var] + # Variable selection + selected_var = st.selectbox("Select variable:", list(data.data_vars)) + array_to_plot = data[selected_var] else: - # For DataArray, we already have the variable - variable = data - selected_var = variable.name if variable.name is not None else 'DataArray' - container.subheader(f'Exploring: {selected_var}') - - # Display variable info - container.write(f'**Variable shape:** {variable.shape}') - container.write(f'**Variable dimensions:** {variable.dims}') - dims = list(variable.dims) - - # Create column layout - col1, col2 = container.columns([1, 2]) - - with col1: - container.subheader('Query Parameters') - - # Set filters for each dimension - filters = {} - for dim in dims: - dim_filter = get_dimension_selector(data, dim, container, st_key=st_key) - if dim_filter is not None: - filters[dim] = dim_filter - - # Aggregation options - container.subheader('Aggregation Options') - agg_dims = container.multiselect('Dimensions to aggregate', dims, key=f'{st_key}_agg_dims_selector') - agg_method = container.selectbox( - 'Aggregation method', ['mean', 'sum', 'min', 'max', 'std'], key=f'{st_key}_agg_method_selector' - ) - - # Check if data has time dimension and add time resampling UI - use_time_resampling, resampling_freq = get_time_aggregation_ui(container, variable, st_key=st_key) - - # Plot type selection - limited to the requested types - container.subheader('Plot Settings') - plot_type = container.selectbox( - 'Plot type', ['Line', 'Stacked Bar', 'Grouped Bar', 'Heatmap'], key=f'{st_key}_plot_type_selector' - ) - - if plot_type in ['Line', 'Stacked Bar', 'Grouped Bar']: - remaining_dims = [d for d in dims if d not in agg_dims] - if remaining_dims: - x_dim = container.selectbox('X axis dimension', remaining_dims, key=f'{st_key}_x_dim_selector') - else: - x_dim = None + # If DataArray, use directly + array_to_plot = data + selected_var = data.name if data.name else "Data" + + # Visualization section + st.subheader("Visualization") + + # Determine available visualization options based on dimensions + dims = list(array_to_plot.dims) + ndim = len(dims) + + # Different visualization options based on dimensionality + if ndim == 0: + # Scalar value + st.metric("Value", float(array_to_plot.values)) + + elif ndim == 1: + # 1D data: line plot + fig = px.line(x=array_to_plot[dims[0]].values, + y=array_to_plot.values, + labels={"x": dims[0], "y": selected_var}) + st.plotly_chart(fig, use_container_width=True) + + # Also show histogram + fig2 = px.histogram(x=array_to_plot.values, nbins=30, + labels={"x": selected_var}) + st.plotly_chart(fig2, use_container_width=True) + + elif ndim >= 2: + # For high dimensional data, let user select dimensions to plot + st.write("Select dimensions to visualize:") + + viz_cols = st.columns(2) + + with viz_cols[0]: + # Choose which dimension to put on x-axis + x_dim = st.selectbox("X dimension:", dims, index=0) + + # Choose which dimension to put on y-axis + remaining_dims = [d for d in dims if d != x_dim] + y_dim = st.selectbox("Y dimension:", remaining_dims, + index=0 if len(remaining_dims) > 0 else None) + + # If we have more than 2 dimensions, let user select values for other dimensions + with viz_cols[1]: + # Setup sliders for other dimensions + slice_dims = [d for d in dims if d not in [x_dim, y_dim]] + slice_indexes = {} + + for dim in slice_dims: + dim_size = array_to_plot.sizes[dim] # Use sizes instead of dims + slice_indexes[dim] = st.slider(f"Position in {dim} dimension", + 0, dim_size-1, dim_size//2) + + # Create slice dictionary for selection + slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} + + # Select the data to plot + if slice_dims: + array_slice = array_to_plot.isel(slice_dict) else: - x_dim = None + array_slice = array_to_plot + + # Visualization depends on whether we have 1 or 2 dimensions selected + if y_dim: + # 2D visualization: heatmap + fig = px.imshow(array_slice.transpose(y_dim, x_dim).values, + x=array_slice[x_dim].values, + y=array_slice[y_dim].values, + color_continuous_scale="viridis", + labels={"x": x_dim, "y": y_dim, "color": selected_var}) + + fig.update_layout(height=500) + st.plotly_chart(fig, use_container_width=True) + else: + # 1D visualization after slicing + fig = px.line(x=array_slice[x_dim].values, + y=array_slice.values, + labels={"x": x_dim, "y": selected_var}) + st.plotly_chart(fig, use_container_width=True) - # Filter and aggregate the selected variable - if is_dataset: - filtered_data = filter_and_aggregate(data[selected_var], filters, agg_dims, agg_method) - else: - # For DataArray, we don't need to select a variable - filtered_data = filter_and_aggregate(data, filters, agg_dims, agg_method) + # Data preview section + st.subheader("Data Preview") - # Apply time resampling if requested - if use_time_resampling and resampling_freq: - container.info(f'Applying time resampling with frequency: {resampling_freq}') - filtered_data = resample_time_data(filtered_data, resampling_freq) + # Convert to dataframe for display + try: + # Limit to first 1000 elements for performance + preview_data = array_to_plot + total_size = np.prod(preview_data.shape) + + if total_size > 1000: + st.warning(f"Data is large ({total_size} elements). Showing first 1000 elements.") + # Create a slice dict to get first elements from each dimension + preview_slice = {} + remaining = 1000 + for dim in preview_data.dims: + dim_size = preview_data.sizes[dim] # Use sizes instead of dims + take = min(dim_size, max(1, int(remaining**(1/len(preview_data.dims))))) + preview_slice[dim] = slice(0, take) + remaining = remaining // take + + preview_data = preview_data.isel(preview_slice) + + # Convert to dataframe and display + df = preview_data.to_dataframe() + st.dataframe(df) + except Exception as e: + st.error(f"Could not convert to dataframe: {str(e)}") - # Display the visualizations - with col2: - container.subheader('Visualization') + # Download options + st.subheader('Download Options') + download_format = st.selectbox( + 'Download format', ['CSV', 'NetCDF', 'Excel'] + ) - # Create the plot - plot_title = f'{selected_var} {plot_type} Plot' - fig = create_plotly_plot( - filtered_data, plot_type, selected_var if is_dataset else None, title=plot_title, x_dim=x_dim - ) + if st.button('Download filtered data'): + download_data(array_to_plot, selected_var, download_format) - # Show the plot - container.plotly_chart(fig, use_container_width=True) + # Display basic information + col1, col2 = st.columns([1, 2]) - # Data preview - with container.expander('Data Preview'): - container.dataframe(filtered_data.to_dataframe()) + with col1: + st.subheader("Data Information") - # Download options - container.subheader('Download Options') - download_format = container.selectbox( - 'Download format', ['CSV', 'NetCDF', 'Excel'], key=f'{st_key}_download_format_selector' - ) + # Show dimensions and their sizes - using sizes instead of dims + st.write("**Dimensions:**") + dim_df = pd.DataFrame({ + "Dimension": list(data.sizes.keys()), + "Size": list(data.sizes.values()) + }) + st.dataframe(dim_df) - if container.button('Download filtered data', key=f'{st_key}_download_button'): - download_data(filtered_data, selected_var, download_format, container) + # For Dataset, show variables + if is_dataset: + st.write("**Variables:**") + var_info = [] + for var_name, var in data.variables.items(): + var_info.append({ + "Variable": var_name, + "Dimensions": ", ".join(var.dims), + "Type": str(var.dtype) + }) + st.dataframe(pd.DataFrame(var_info)) + + # Show coordinates + if data.coords: + st.write("**Coordinates:**") + coord_info = [] + for coord_name, coord in data.coords.items(): + coord_info.append({ + "Coordinate": coord_name, + "Dimensions": ", ".join(coord.dims), + "Type": str(coord.dtype) + }) + st.dataframe(pd.DataFrame(coord_info)) + + # Show attributes + if data.attrs: + st.write("**Attributes:**") + st.json(data.attrs) + + # Display variable information + with col2: + st.subheader(f"Variable: {selected_var}") - return filtered_data + # Display basic stats if numeric + try: + if np.issubdtype(array_to_plot.dtype, np.number): + stats_cols = st.columns(4) + stats_cols[0].metric("Min", float(array_to_plot.min().values)) + stats_cols[1].metric("Max", float(array_to_plot.max().values)) + stats_cols[2].metric("Mean", float(array_to_plot.mean().values)) + stats_cols[3].metric("Std", float(array_to_plot.std().values)) + except: + pass @show_traceback() @@ -1179,7 +498,7 @@ def explore_results_app(results): # Variables tab with tabs[1]: # Use the reusable function - xarray_explorer_component(component.solution) + xarray_explorer(component.solution) # Buses page elif selected_page == "Buses": @@ -1226,7 +545,7 @@ def explore_results_app(results): # Variables tab with tabs[1]: # Use the reusable function - xarray_explorer_component(bus.solution) + xarray_explorer(bus.solution) # Effects page elif selected_page == "Effects": @@ -1239,30 +558,24 @@ def explore_results_app(results): st.header(f"Effect: {effect_name}") - xarray_explorer_component(effect.solution) + xarray_explorer(effect.solution) elif selected_page == "Flows DS": st.title('Flow Rates Dataset') mode = st.selectbox("Select a mode", ['Flow Rates', 'Flow Hours']) if mode == 'Flow Hours': - xarray_explorer_component(results.flow_hours()) + xarray_explorer(results.flow_hours()) else: - xarray_explorer_component(results.flow_rates()) + xarray_explorer(results.flow_rates()) elif selected_page == 'Effects DS': st.title('Effects Dataset') - tabs = st.tabs(["total", "invest", "operation"]) - - with tabs[0]: - xarray_explorer_component(results.effects_per_component('total'), st_key='total') - with tabs[1]: - xarray_explorer_component(results.effects_per_component('invest'), st_key='invest') - with tabs[2]: - xarray_explorer_component(results.effects_per_component('operation'), st_key='operation') + mode = st.selectbox("Select a mode", ['total', 'invest', 'operation']) + xarray_explorer(results.effects_per_component(mode)) elif selected_page == "Explorer": st.title("Explore all variable results") - xarray_explorer_component(results.solution) + xarray_explorer(results.solution) def run_explorer_from_file(folder, name): From d636276d50906b5a1edfef4af8f2273876341fae Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 13:09:18 +0200 Subject: [PATCH 75/87] Make more modular --- flixopt/explorer_app.py | 531 ++++++++++++++++++++++++++++------------ 1 file changed, 375 insertions(+), 156 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 0c0e7112c..c0d0c9b28 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -159,121 +159,204 @@ def download_data(filtered_data: xr.DataArray, var_name: str, download_format: s @show_traceback() -def xarray_explorer(data: Union[xr.Dataset, xr.DataArray]): +def display_data_info(data: Union[xr.Dataset, xr.DataArray], container: Optional[Any] = None) -> None: """ - A simple xarray explorer for both DataArrays and Datasets. - Just pass your xarray object to this function. + Display basic information about an xarray object. Args: data: xarray.Dataset or xarray.DataArray + container: Streamlit container to render in (if None, uses st directly) """ - # Determine if we're working with Dataset or DataArray - is_dataset = isinstance(data, xr.Dataset) + if container is None: + container = st + + # Show dimensions and their sizes + container.write('**Dimensions:**') + dim_df = pd.DataFrame({'Dimension': list(data.sizes.keys()), 'Size': list(data.sizes.values())}) + container.dataframe(dim_df) + + # For Dataset, show variables + if isinstance(data, xr.Dataset): + container.write('**Variables:**') + var_info = [] + for var_name, var in data.variables.items(): + var_info.append({'Variable': var_name, 'Dimensions': ', '.join(var.dims), 'Type': str(var.dtype)}) + container.dataframe(pd.DataFrame(var_info)) + + # Show coordinates + if data.coords: + container.write('**Coordinates:**') + coord_info = [] + for coord_name, coord in data.coords.items(): + coord_info.append({'Coordinate': coord_name, 'Dimensions': ', '.join(coord.dims), 'Type': str(coord.dtype)}) + container.dataframe(pd.DataFrame(coord_info)) + + # Show attributes + if data.attrs: + container.write('**Attributes:**') + container.json(data.attrs) - # Variable selection for Dataset or direct visualization for DataArray - if is_dataset: - # Variable selection - selected_var = st.selectbox("Select variable:", list(data.data_vars)) - array_to_plot = data[selected_var] + +@show_traceback() +def display_variable_stats(array: xr.DataArray, container: Optional[Any] = None) -> None: + """ + Display basic statistics for a DataArray if it's numeric. + + Args: + array: xarray.DataArray to compute stats for + container: Streamlit container to render in (if None, uses st directly) + """ + if container is None: + container = st + + try: + if np.issubdtype(array.dtype, np.number): + stats_cols = container.columns(4) + stats_cols[0].metric('Min', float(array.min().values)) + stats_cols[1].metric('Max', float(array.max().values)) + stats_cols[2].metric('Mean', float(array.mean().values)) + stats_cols[3].metric('Std', float(array.std().values)) + except: + pass + + +@show_traceback() +def plot_scalar(array: xr.DataArray, container: Optional[Any] = None) -> None: + """ + Plot a scalar (0-dimensional) DataArray. + + Args: + array: xarray.DataArray with 0 dimensions + container: Streamlit container to render in (if None, uses st directly) + """ + if container is None: + container = st + + container.metric('Value', float(array.values)) + + +@show_traceback() +def plot_1d(array: xr.DataArray, var_name: str, container: Optional[Any] = None) -> None: + """ + Plot a 1-dimensional DataArray. + + Args: + array: xarray.DataArray with 1 dimension + var_name: Name of the variable being plotted + container: Streamlit container to render in (if None, uses st directly) + """ + if container is None: + container = st + + dim = list(array.dims)[0] + + # Line plot + fig = px.line(x=array[dim].values, y=array.values, labels={'x': dim, 'y': var_name}) + container.plotly_chart(fig, use_container_width=True) + + # Histogram + fig2 = px.histogram(x=array.values, nbins=30, labels={'x': var_name}) + container.plotly_chart(fig2, use_container_width=True) + + +@show_traceback() +def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) -> Tuple[xr.DataArray, Optional[Dict]]: + """ + Plot a multi-dimensional DataArray with interactive dimension selectors. + + Args: + array: xarray.DataArray with 2+ dimensions + var_name: Name of the variable being plotted + container: Streamlit container to render in (if None, uses st directly) + + Returns: + Tuple of (sliced array, selection dictionary) + """ + if container is None: + container = st + + dims = list(array.dims) + + container.write('Select dimensions to visualize:') + + viz_cols = container.columns(2) + + with viz_cols[0]: + # Choose which dimension to put on x-axis + x_dim = st.selectbox('X dimension:', dims, index=0) + + # Choose which dimension to put on y-axis + remaining_dims = [d for d in dims if d != x_dim] + y_dim = st.selectbox('Y dimension:', remaining_dims, index=0 if len(remaining_dims) > 0 else None) + + # If we have more than 2 dimensions, let user select values for other dimensions + with viz_cols[1]: + # Setup sliders for other dimensions + slice_dims = [d for d in dims if d not in [x_dim, y_dim]] + slice_indexes = {} + + for dim in slice_dims: + dim_size = array.sizes[dim] + slice_indexes[dim] = st.slider(f'Position in {dim} dimension', 0, dim_size - 1, dim_size // 2) + + # Create slice dictionary for selection + slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} + + # Select the data to plot + if slice_dims: + array_slice = array.isel(slice_dict) else: - # If DataArray, use directly - array_to_plot = data - selected_var = data.name if data.name else "Data" + array_slice = array + + # Visualization depends on whether we have 1 or 2 dimensions selected + if y_dim: + # 2D visualization: heatmap + fig = px.imshow( + array_slice.transpose(y_dim, x_dim).values, + x=array_slice[x_dim].values, + y=array_slice[y_dim].values, + color_continuous_scale='viridis', + labels={'x': x_dim, 'y': y_dim, 'color': var_name}, + ) - # Visualization section - st.subheader("Visualization") + fig.update_layout(height=500) + container.plotly_chart(fig, use_container_width=True) + else: + # 1D visualization after slicing + fig = px.line(x=array_slice[x_dim].values, y=array_slice.values, labels={'x': x_dim, 'y': var_name}) + container.plotly_chart(fig, use_container_width=True) - # Determine available visualization options based on dimensions - dims = list(array_to_plot.dims) - ndim = len(dims) + return array_slice, slice_dict - # Different visualization options based on dimensionality - if ndim == 0: - # Scalar value - st.metric("Value", float(array_to_plot.values)) - elif ndim == 1: - # 1D data: line plot - fig = px.line(x=array_to_plot[dims[0]].values, - y=array_to_plot.values, - labels={"x": dims[0], "y": selected_var}) - st.plotly_chart(fig, use_container_width=True) - - # Also show histogram - fig2 = px.histogram(x=array_to_plot.values, nbins=30, - labels={"x": selected_var}) - st.plotly_chart(fig2, use_container_width=True) - - elif ndim >= 2: - # For high dimensional data, let user select dimensions to plot - st.write("Select dimensions to visualize:") - - viz_cols = st.columns(2) - - with viz_cols[0]: - # Choose which dimension to put on x-axis - x_dim = st.selectbox("X dimension:", dims, index=0) - - # Choose which dimension to put on y-axis - remaining_dims = [d for d in dims if d != x_dim] - y_dim = st.selectbox("Y dimension:", remaining_dims, - index=0 if len(remaining_dims) > 0 else None) - - # If we have more than 2 dimensions, let user select values for other dimensions - with viz_cols[1]: - # Setup sliders for other dimensions - slice_dims = [d for d in dims if d not in [x_dim, y_dim]] - slice_indexes = {} - - for dim in slice_dims: - dim_size = array_to_plot.sizes[dim] # Use sizes instead of dims - slice_indexes[dim] = st.slider(f"Position in {dim} dimension", - 0, dim_size-1, dim_size//2) - - # Create slice dictionary for selection - slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} - - # Select the data to plot - if slice_dims: - array_slice = array_to_plot.isel(slice_dict) - else: - array_slice = array_to_plot - - # Visualization depends on whether we have 1 or 2 dimensions selected - if y_dim: - # 2D visualization: heatmap - fig = px.imshow(array_slice.transpose(y_dim, x_dim).values, - x=array_slice[x_dim].values, - y=array_slice[y_dim].values, - color_continuous_scale="viridis", - labels={"x": x_dim, "y": y_dim, "color": selected_var}) - - fig.update_layout(height=500) - st.plotly_chart(fig, use_container_width=True) - else: - # 1D visualization after slicing - fig = px.line(x=array_slice[x_dim].values, - y=array_slice.values, - labels={"x": x_dim, "y": selected_var}) - st.plotly_chart(fig, use_container_width=True) +@show_traceback() +def display_data_preview(array: xr.DataArray, container: Optional[Any] = None) -> pd.DataFrame: + """ + Display a preview of the data as a dataframe. - # Data preview section - st.subheader("Data Preview") + Args: + array: xarray.DataArray to preview + container: Streamlit container to render in (if None, uses st directly) + + Returns: + DataFrame containing the preview data + """ + if container is None: + container = st - # Convert to dataframe for display try: # Limit to first 1000 elements for performance - preview_data = array_to_plot + preview_data = array total_size = np.prod(preview_data.shape) if total_size > 1000: - st.warning(f"Data is large ({total_size} elements). Showing first 1000 elements.") + container.warning(f'Data is large ({total_size} elements). Showing first 1000 elements.') # Create a slice dict to get first elements from each dimension preview_slice = {} remaining = 1000 for dim in preview_data.dims: - dim_size = preview_data.sizes[dim] # Use sizes instead of dims - take = min(dim_size, max(1, int(remaining**(1/len(preview_data.dims))))) + dim_size = preview_data.sizes[dim] + take = min(dim_size, max(1, int(remaining ** (1 / len(preview_data.dims))))) preview_slice[dim] = slice(0, take) remaining = remaining // take @@ -281,76 +364,212 @@ def xarray_explorer(data: Union[xr.Dataset, xr.DataArray]): # Convert to dataframe and display df = preview_data.to_dataframe() - st.dataframe(df) + container.dataframe(df) + return df except Exception as e: - st.error(f"Could not convert to dataframe: {str(e)}") + container.error(f'Could not convert to dataframe: {str(e)}') + return pd.DataFrame() + + +@show_traceback() +def xarray_explorer( + data: Union[xr.Dataset, xr.DataArray], + custom_plotters: Optional[Dict[str, Callable]] = None, + container: Optional[Any] = None, +) -> Dict[str, Any]: + """ + A modular xarray explorer for both DataArrays and Datasets. + + Args: + data: xarray.Dataset or xarray.DataArray + custom_plotters: Dictionary of custom plotting functions by dimension. + Keys are 'scalar', '1d', and 'nd'. + title: Title for the explorer + container: Streamlit container to render in (if None, uses st directly) + + Returns: + Dictionary containing information about the current state: + - 'data': Original xarray data + - 'selected_array': Currently selected/displayed array + - 'selected_var': Name of selected variable + - 'sliced_array': Array after slicing (for multi-dimensional arrays) + - 'slice_dict': Dictionary of dimension slices applied + """ + if container is None: + container = st + + # Determine if we're working with Dataset or DataArray + is_dataset = isinstance(data, xr.Dataset) + + # Variable selection for Dataset or direct visualization for DataArray + if is_dataset: + # Variable selection + selected_var = container.selectbox('Select variable:', list(data.data_vars)) + array_to_plot = data[selected_var] + else: + # If DataArray, use directly + array_to_plot = data + selected_var = data.name if data.name else 'Data' + + # Initialize result dictionary + result = { + 'data': data, + 'selected_array': array_to_plot, + 'selected_var': selected_var, + 'sliced_array': None, + 'slice_dict': None, + } + + # Visualization in right column + container.subheader('Visualization') + + # Determine available visualization options based on dimensions + dims = list(array_to_plot.dims) + ndim = len(dims) + + # Get the appropriate plotter function + plotters = {'scalar': plot_scalar, '1d': plot_1d, 'nd': plot_nd} + + # Override with custom plotters if provided + if custom_plotters: + plotters.update(custom_plotters) + + # Different visualization options based on dimensionality + if ndim == 0: + # Scalar value + plotters['scalar'](array_to_plot, container) + elif ndim == 1: + # 1D data + plotters['1d'](array_to_plot, selected_var, container) + else: + # 2D+ data + sliced_array, slice_dict = plotters['nd'](array_to_plot, selected_var, container) + result['sliced_array'] = sliced_array + result['slice_dict'] = slice_dict + + # Data preview section + container.subheader('Data Preview') + display_data_preview(array_to_plot, container) # Download options - st.subheader('Download Options') - download_format = st.selectbox( - 'Download format', ['CSV', 'NetCDF', 'Excel'] - ) + container.subheader('Download Options') + download_format = container.selectbox('Download format', ['CSV', 'NetCDF', 'Excel']) + + if container.button('Download filtered data'): + download_data( + array_to_plot if result['sliced_array'] is None else result['sliced_array'], + selected_var, + download_format, + container, + ) + - if st.button('Download filtered data'): - download_data(array_to_plot, selected_var, download_format) - - # Display basic information - col1, col2 = st.columns([1, 2]) - - with col1: - st.subheader("Data Information") - - # Show dimensions and their sizes - using sizes instead of dims - st.write("**Dimensions:**") - dim_df = pd.DataFrame({ - "Dimension": list(data.sizes.keys()), - "Size": list(data.sizes.values()) - }) - st.dataframe(dim_df) - - # For Dataset, show variables - if is_dataset: - st.write("**Variables:**") - var_info = [] - for var_name, var in data.variables.items(): - var_info.append({ - "Variable": var_name, - "Dimensions": ", ".join(var.dims), - "Type": str(var.dtype) - }) - st.dataframe(pd.DataFrame(var_info)) - - # Show coordinates - if data.coords: - st.write("**Coordinates:**") - coord_info = [] - for coord_name, coord in data.coords.items(): - coord_info.append({ - "Coordinate": coord_name, - "Dimensions": ", ".join(coord.dims), - "Type": str(coord.dtype) - }) - st.dataframe(pd.DataFrame(coord_info)) - - # Show attributes - if data.attrs: - st.write("**Attributes:**") - st.json(data.attrs) + container.subheader('Data Information') + display_data_info(data, container) # Display variable information - with col2: - st.subheader(f"Variable: {selected_var}") + container.subheader(f'Variable: {selected_var}') + display_variable_stats(array_to_plot, container) + + return result + + +# Example of a custom plotter +@show_traceback() +def custom_heatmap_plotter( + array: xr.DataArray, var_name: str, container: Optional[Any] = None +) -> Tuple[xr.DataArray, Optional[Dict]]: + """ + A custom plotter for multi-dimensional arrays that uses a different color scheme. + + Args: + array: xarray.DataArray with 2+ dimensions + var_name: Name of the variable being plotted + container: Streamlit container to render in (if None, uses st directly) + + Returns: + Tuple of (sliced array, selection dictionary) + """ + if container is None: + container = st + + # You can reuse much of the code from plot_nd but customize the actual plotting + dims = list(array.dims) + + container.write('Select dimensions to visualize:') + + viz_cols = container.columns(2) + + with viz_cols[0]: + # Choose which dimension to put on x-axis + x_dim = st.selectbox('X dimension:', dims, index=0, key='custom_x_dim') + + # Choose which dimension to put on y-axis + remaining_dims = [d for d in dims if d != x_dim] + y_dim = st.selectbox( + 'Y dimension:', remaining_dims, index=0 if len(remaining_dims) > 0 else None, key='custom_y_dim' + ) + + # If we have more than 2 dimensions, let user select values for other dimensions + with viz_cols[1]: + # Setup sliders for other dimensions + slice_dims = [d for d in dims if d not in [x_dim, y_dim]] + slice_indexes = {} + + for dim in slice_dims: + dim_size = array.sizes[dim] + slice_indexes[dim] = st.slider( + f'Position in {dim} dimension', 0, dim_size - 1, dim_size // 2, key=f'custom_{dim}_slider' + ) + + # Create slice dictionary for selection + slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} + + # Select the data to plot + if slice_dims: + array_slice = array.isel(slice_dict) + else: + array_slice = array + + # Visualization depends on whether we have 1 or 2 dimensions selected + if y_dim: + # 2D visualization: heatmap with CUSTOM COLORS and LAYOUT + fig = px.imshow( + array_slice.transpose(y_dim, x_dim).values, + x=array_slice[x_dim].values, + y=array_slice[y_dim].values, + color_continuous_scale='Plasma', # Different color scale + labels={'x': x_dim, 'y': y_dim, 'color': var_name}, + ) + + # Customize layout + fig.update_layout( + height=600, # Taller + margin=dict(l=50, r=50, t=50, b=50), # More margin + coloraxis_colorbar=dict( + title=var_name, + thicknessmode='pixels', + thickness=20, + lenmode='pixels', + len=400, + outlinewidth=1, + outlinecolor='black', + borderwidth=1, + ), + ) + + container.plotly_chart(fig, use_container_width=True) + else: + # 1D visualization after slicing - with CUSTOM LINE STYLE + fig = px.line(x=array_slice[x_dim].values, y=array_slice.values, labels={'x': x_dim, 'y': var_name}) + + # Customize the line + fig.update_traces(line=dict(width=3, dash='dash', color='darkred')) + + container.plotly_chart(fig, use_container_width=True) + + return array_slice, slice_dict - # Display basic stats if numeric - try: - if np.issubdtype(array_to_plot.dtype, np.number): - stats_cols = st.columns(4) - stats_cols[0].metric("Min", float(array_to_plot.min().values)) - stats_cols[1].metric("Max", float(array_to_plot.max().values)) - stats_cols[2].metric("Mean", float(array_to_plot.mean().values)) - stats_cols[3].metric("Std", float(array_to_plot.std().values)) - except: - pass @show_traceback() From 4e4458df11590b8df337fe439b39cc49f0087864 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 13:14:45 +0200 Subject: [PATCH 76/87] Improve the plotters --- flixopt/explorer_app.py | 155 +++++++++++++++++++++++++++++++++------- 1 file changed, 129 insertions(+), 26 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index c0d0c9b28..268d0d5d6 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -238,7 +238,7 @@ def plot_scalar(array: xr.DataArray, container: Optional[Any] = None) -> None: @show_traceback() def plot_1d(array: xr.DataArray, var_name: str, container: Optional[Any] = None) -> None: """ - Plot a 1-dimensional DataArray. + Plot a 1-dimensional DataArray with multiple plot type options. Args: array: xarray.DataArray with 1 dimension @@ -250,19 +250,56 @@ def plot_1d(array: xr.DataArray, var_name: str, container: Optional[Any] = None) dim = list(array.dims)[0] - # Line plot - fig = px.line(x=array[dim].values, y=array.values, labels={'x': dim, 'y': var_name}) - container.plotly_chart(fig, use_container_width=True) + # Add plot type selector + plot_type = container.selectbox('Plot type:', ['Line', 'Bar', 'Histogram', 'Area'], key=f'plot_type_1d_{var_name}') + + # Create figure based on selected plot type + if plot_type == 'Line': + fig = px.line( + x=array[dim].values, y=array.values, labels={'x': dim, 'y': var_name}, title=f'{var_name} by {dim}' + ) + elif plot_type == 'Bar': + df = pd.DataFrame({dim: array[dim].values, 'value': array.values}) + fig = px.bar(df, x=dim, y='value', labels={'value': var_name}, title=f'{var_name} by {dim}') + elif plot_type == 'Histogram': + fig = px.histogram( + x=array.values, + nbins=min(30, len(array) // 2) if len(array) > 2 else 10, + labels={'x': var_name}, + title=f'Distribution of {var_name}', + ) + elif plot_type == 'Area': + df = pd.DataFrame({dim: array[dim].values, 'value': array.values}) + fig = px.area(df, x=dim, y='value', labels={'value': var_name}, title=f'{var_name} by {dim}') - # Histogram - fig2 = px.histogram(x=array.values, nbins=30, labels={'x': var_name}) - container.plotly_chart(fig2, use_container_width=True) + # Show the plot + container.plotly_chart(fig, use_container_width=True) + # For 1D data, we can also offer some basic statistics + if container.checkbox('Show statistics', key=f'show_stats_{var_name}'): + try: + stats = pd.DataFrame( + { + 'Statistic': ['Min', 'Max', 'Mean', 'Median', 'Std', 'Sum'], + 'Value': [ + float(array.min().values), + float(array.max().values), + float(array.mean().values), + float(np.median(array.values)), + float(array.std().values), + float(array.sum().values), + ], + } + ) + container.dataframe(stats, use_container_width=True) + except Exception as e: + container.warning(f'Could not compute statistics: {str(e)}') @show_traceback() def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) -> Tuple[xr.DataArray, Optional[Dict]]: """ Plot a multi-dimensional DataArray with interactive dimension selectors. + Supports multiple plot types: heatmap, line, stacked bar, and grouped bar. Args: array: xarray.DataArray with 2+ dimensions @@ -285,14 +322,26 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) # Choose which dimension to put on x-axis x_dim = st.selectbox('X dimension:', dims, index=0) - # Choose which dimension to put on y-axis + # Choose which dimension to put on y-axis if we have at least 2 dimensions remaining_dims = [d for d in dims if d != x_dim] - y_dim = st.selectbox('Y dimension:', remaining_dims, index=0 if len(remaining_dims) > 0 else None) + y_dim = None + if len(remaining_dims) > 0: + y_dim_options = ['None'] + remaining_dims + y_dim_selection = st.selectbox('Y dimension:', y_dim_options, index=1) + if y_dim_selection != 'None': + y_dim = y_dim_selection + + # Add plot type selector + plot_type = st.selectbox( + 'Plot type:', + ['Heatmap', 'Line', 'Stacked Bar', 'Grouped Bar'], + index=0 if y_dim is not None else 1, # Default to heatmap for 2D, line for 1D + ) - # If we have more than 2 dimensions, let user select values for other dimensions + # If we have more than the selected dimensions, let user select values for other dimensions with viz_cols[1]: # Setup sliders for other dimensions - slice_dims = [d for d in dims if d not in [x_dim, y_dim]] + slice_dims = [d for d in dims if d not in ([x_dim] if y_dim is None else [x_dim, y_dim])] slice_indexes = {} for dim in slice_dims: @@ -308,26 +357,80 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) else: array_slice = array - # Visualization depends on whether we have 1 or 2 dimensions selected - if y_dim: - # 2D visualization: heatmap - fig = px.imshow( - array_slice.transpose(y_dim, x_dim).values, - x=array_slice[x_dim].values, - y=array_slice[y_dim].values, - color_continuous_scale='viridis', - labels={'x': x_dim, 'y': y_dim, 'color': var_name}, - ) + # Visualization depends on the selected plot type and dimensions + if y_dim is not None: + # 2D visualization + if plot_type == 'Heatmap': + # Heatmap visualization + fig = px.imshow( + array_slice.transpose(y_dim, x_dim).values, + x=array_slice[x_dim].values, + y=array_slice[y_dim].values, + color_continuous_scale='viridis', + labels={'x': x_dim, 'y': y_dim, 'color': var_name}, + ) + fig.update_layout(height=500) + elif plot_type == 'Line': + # Line plot with multiple lines (one per y-dimension value) + fig = go.Figure() + + # Convert to dataframe for easier plotting + df = array_slice.to_dataframe(name='value').reset_index() + + # Group by y-dimension for multiple lines + for y_val in array_slice[y_dim].values: + df_subset = df[df[y_dim] == y_val] + fig.add_trace( + go.Scatter(x=df_subset[x_dim], y=df_subset['value'], mode='lines', name=f'{y_dim}={y_val}') + ) - fig.update_layout(height=500) - container.plotly_chart(fig, use_container_width=True) + fig.update_layout( + height=500, + title=f'{var_name} by {x_dim} and {y_dim}', + xaxis_title=x_dim, + yaxis_title=var_name, + legend_title=y_dim, + ) + elif plot_type == 'Stacked Bar': + # Stacked bar chart + # Convert to dataframe for easier plotting + df = array_slice.to_dataframe(name='value').reset_index() + + fig = px.bar( + df, + x=x_dim, + y='value', + color=y_dim, + barmode='stack', + labels={'value': var_name, x_dim: x_dim, y_dim: y_dim}, + ) + fig.update_layout(height=500) + elif plot_type == 'Grouped Bar': + # Grouped bar chart + # Convert to dataframe for easier plotting + df = array_slice.to_dataframe(name='value').reset_index() + + fig = px.bar( + df, + x=x_dim, + y='value', + color=y_dim, + barmode='group', + labels={'value': var_name, x_dim: x_dim, y_dim: y_dim}, + ) + fig.update_layout(height=500) else: # 1D visualization after slicing - fig = px.line(x=array_slice[x_dim].values, y=array_slice.values, labels={'x': x_dim, 'y': var_name}) - container.plotly_chart(fig, use_container_width=True) + if plot_type in ['Line', 'Heatmap']: # Default to line for 1D data + fig = px.line(x=array_slice[x_dim].values, y=array_slice.values, labels={'x': x_dim, 'y': var_name}) + elif plot_type in ['Stacked Bar', 'Grouped Bar']: # Both are the same for 1D + # Create a dataframe for the bar chart + df = pd.DataFrame({x_dim: array_slice[x_dim].values, 'value': array_slice.values}) - return array_slice, slice_dict + fig = px.bar(df, x=x_dim, y='value', labels={'value': var_name}) + container.plotly_chart(fig, use_container_width=True) + return array_slice, slice_dict @show_traceback() def display_data_preview(array: xr.DataArray, container: Optional[Any] = None) -> pd.DataFrame: From 62efc8530dc278d9fff67941b84504a5dcb3f61b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 13:45:04 +0200 Subject: [PATCH 77/87] Add options to aggregate dimensions --- flixopt/explorer_app.py | 166 +++++++++++++++++++++++++++++++++++----- 1 file changed, 146 insertions(+), 20 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 268d0d5d6..7323e2e29 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -220,6 +220,63 @@ def display_variable_stats(array: xr.DataArray, container: Optional[Any] = None) pass +@show_traceback() +def aggregate_dimensions( + array: xr.DataArray, agg_dims: List[str], agg_method: str, container: Optional[Any] = None +) -> xr.DataArray: + """ + Aggregate a DataArray over specified dimensions using a specified method. + + Args: + array: xarray.DataArray to aggregate + agg_dims: List of dimension names to aggregate over + agg_method: Aggregation method ('mean', 'sum', 'min', 'max', 'std', 'median') + container: Streamlit container for displaying messages + + Returns: + Aggregated DataArray + """ + if container is None: + container = st + + # Filter out any dimensions that don't exist in the array + valid_agg_dims = [dim for dim in agg_dims if dim in array.dims] + + # If there are no valid dimensions to aggregate over, just return the original array + if not valid_agg_dims: + return array + + # Apply the selected aggregation method + try: + if agg_method == 'mean': + result = array.mean(dim=valid_agg_dims) + elif agg_method == 'sum': + result = array.sum(dim=valid_agg_dims) + elif agg_method == 'min': + result = array.min(dim=valid_agg_dims) + elif agg_method == 'max': + result = array.max(dim=valid_agg_dims) + elif agg_method == 'std': + result = array.std(dim=valid_agg_dims) + elif agg_method == 'median': + result = array.median(dim=valid_agg_dims) + elif agg_method == 'var': + result = array.var(dim=valid_agg_dims) + else: + container.warning(f"Unknown aggregation method: {agg_method}. Using 'mean' instead.") + result = array.mean(dim=valid_agg_dims) + + # If the aggregation removed all dimensions, ensure result has correct shape + if len(result.dims) == 0: + # Convert scalar result to 0D DataArray + result = xr.DataArray(result.values, name=array.name, attrs=array.attrs) + + return result + except Exception as e: + container.error(f'Error during aggregation: {str(e)}') + return array # Return original array if aggregation fails + + @show_traceback() def plot_scalar(array: xr.DataArray, container: Optional[Any] = None) -> None: """ @@ -299,7 +356,7 @@ def plot_1d(array: xr.DataArray, var_name: str, container: Optional[Any] = None) def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) -> Tuple[xr.DataArray, Optional[Dict]]: """ Plot a multi-dimensional DataArray with interactive dimension selectors. - Supports multiple plot types: heatmap, line, stacked bar, and grouped bar. + Supports multiple plot types and dimension aggregation. Args: array: xarray.DataArray with 2+ dimensions @@ -314,12 +371,58 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) dims = list(array.dims) - container.write('Select dimensions to visualize:') + # Aggregation options + container.subheader('Dimension Handling') - viz_cols = container.columns(2) + # Define columns for the UI layout + col1, col2 = container.columns(2) - with viz_cols[0]: - # Choose which dimension to put on x-axis + with col1: + # Multi-select for dimensions to aggregate + agg_dims = st.multiselect( + 'Dimensions to aggregate:', + dims, + default=[], + help='Select dimensions to aggregate (reduce) using the method selected below', + ) + + # Aggregation method selection + agg_method = st.selectbox( + 'Aggregation method:', + ['mean', 'sum', 'min', 'max', 'std', 'median', 'var'], + index=0, + help='Method used to aggregate over the selected dimensions', + ) + + # Apply aggregation if dimensions were selected + if agg_dims: + orig_dims = dims.copy() + array = aggregate_dimensions(array, agg_dims, agg_method, container) + + # Update the list of available dimensions after aggregation + dims = list(array.dims) + + # Show information about the aggregation + removed_dims = [dim for dim in orig_dims if dim not in dims] + if removed_dims: + msg = f'Applied {agg_method} aggregation over: {", ".join(removed_dims)}' + container.info(msg) + + # If no dimensions left after aggregation, show scalar result + if len(dims) == 0: + plot_scalar(array, container) + return array, None + + # If one dimension left after aggregation, use 1D plotting + if len(dims) == 1: + plot_1d(array, var_name, container) + return array, None + + # Visualization options for 2+ dimensions + container.subheader('Visualization Settings') + + # Choose which dimension to put on x-axis + with col2: x_dim = st.selectbox('X dimension:', dims, index=0) # Choose which dimension to put on y-axis if we have at least 2 dimensions @@ -332,21 +435,42 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) y_dim = y_dim_selection # Add plot type selector - plot_type = st.selectbox( - 'Plot type:', - ['Heatmap', 'Line', 'Stacked Bar', 'Grouped Bar'], - index=0 if y_dim is not None else 1, # Default to heatmap for 2D, line for 1D - ) + plot_types = ['Heatmap', 'Line', 'Stacked Bar', 'Grouped Bar'] + if y_dim is None: + # Remove heatmap option if there's no Y dimension + plot_types = [pt for pt in plot_types if pt != 'Heatmap'] + default_idx = 0 # Default to Line for 1D + else: + default_idx = 0 # Default to Heatmap for 2D - # If we have more than the selected dimensions, let user select values for other dimensions - with viz_cols[1]: - # Setup sliders for other dimensions - slice_dims = [d for d in dims if d not in ([x_dim] if y_dim is None else [x_dim, y_dim])] - slice_indexes = {} + plot_type = st.selectbox('Plot type:', plot_types, index=default_idx) - for dim in slice_dims: - dim_size = array.sizes[dim] - slice_indexes[dim] = st.slider(f'Position in {dim} dimension', 0, dim_size - 1, dim_size // 2) + # If we have more than the selected dimensions, let user select values for other dimensions + container.subheader('Other Dimension Values') + + # Calculate which dimensions need slicers + slice_dims = [d for d in dims if d not in ([x_dim] if y_dim is None else [x_dim, y_dim])] + slice_indexes = {} + + # Create sliders in a more compact layout if there are many dimensions + if len(slice_dims) > 0: + if len(slice_dims) <= 3: + # For a few dimensions, use columns + cols = container.columns(len(slice_dims)) + for i, dim in enumerate(slice_dims): + dim_size = array.sizes[dim] + with cols[i]: + slice_indexes[dim] = st.slider( + f'{dim}', 0, dim_size - 1, dim_size // 2, help=f'Select position along {dim} dimension' + ) + else: + # For many dimensions, use a more compact layout + with container.expander('Select values for other dimensions', expanded=True): + for dim in slice_dims: + dim_size = array.sizes[dim] + slice_indexes[dim] = st.slider( + f'{dim}', 0, dim_size - 1, dim_size // 2, help=f'Select position along {dim} dimension' + ) # Create slice dictionary for selection slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} @@ -358,6 +482,8 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) array_slice = array # Visualization depends on the selected plot type and dimensions + container.subheader('Plot') + if y_dim is not None: # 2D visualization if plot_type == 'Heatmap': @@ -420,8 +546,8 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) ) fig.update_layout(height=500) else: - # 1D visualization after slicing - if plot_type in ['Line', 'Heatmap']: # Default to line for 1D data + # 1D visualization after slicing (no y_dim) + if plot_type == 'Line': fig = px.line(x=array_slice[x_dim].values, y=array_slice.values, labels={'x': x_dim, 'y': var_name}) elif plot_type in ['Stacked Bar', 'Grouped Bar']: # Both are the same for 1D # Create a dataframe for the bar chart From 8d3f87ad47a41542d507660959615cd5b96a132a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 13:52:56 +0200 Subject: [PATCH 78/87] Add page for sizes data --- flixopt/explorer_app.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 7323e2e29..aae1cc350 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -819,7 +819,7 @@ def explore_results_app(results): # Create sidebar for navigation st.sidebar.title("FlixOpt Results Explorer") - pages = ["Overview", "Components", "Buses", "Effects", "Flows DS", "Effects DS", "Explorer"] + pages = ["Overview", "Components", "Buses", "Effects", "Flows DS", "Effects DS", 'Sizes DS', "Explorer"] selected_page = st.sidebar.radio("Navigation", pages) # Overview page @@ -1021,6 +1021,10 @@ def explore_results_app(results): mode = st.selectbox("Select a mode", ['total', 'invest', 'operation']) xarray_explorer(results.effects_per_component(mode)) + elif selected_page == 'Sizes DS': + st.title('Sizes Dataset') + xarray_explorer(results.sizes()) + elif selected_page == "Explorer": st.title("Explore all variable results") xarray_explorer(results.solution) From 6a68de1c3a8742d110e4bc99fb2b35a30a203e97 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 14:07:34 +0200 Subject: [PATCH 79/87] Improve plotting a bit more --- flixopt/explorer_app.py | 164 +++++++++++++++++++++++----------------- 1 file changed, 93 insertions(+), 71 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index aae1cc350..a27d29e2a 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -352,6 +352,7 @@ def plot_1d(array: xr.DataArray, var_name: str, container: Optional[Any] = None) except Exception as e: container.warning(f'Could not compute statistics: {str(e)}') + @show_traceback() def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) -> Tuple[xr.DataArray, Optional[Dict]]: """ @@ -371,41 +372,55 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) dims = list(array.dims) - # Aggregation options - container.subheader('Dimension Handling') - - # Define columns for the UI layout - col1, col2 = container.columns(2) + # Add custom CSS to reduce spacing + container.markdown( + """ + + """, + unsafe_allow_html=True, + ) - with col1: - # Multi-select for dimensions to aggregate - agg_dims = st.multiselect( - 'Dimensions to aggregate:', - dims, - default=[], - help='Select dimensions to aggregate (reduce) using the method selected below', - ) + # Use tabs for main sections + dim_tab, viz_tab = container.tabs(['Dimension Settings', 'Visualization Settings']) + + # === DIMENSION SETTINGS TAB === + with dim_tab: + # Use columns for dimension handling + agg_col1, agg_col2 = dim_tab.columns([3, 2]) + + with agg_col1: + # Multi-select for dimensions to aggregate + agg_dims = st.multiselect( + 'Dimensions to aggregate:', + dims, + default=[], + help='Select dimensions to aggregate', + ) - # Aggregation method selection - agg_method = st.selectbox( - 'Aggregation method:', - ['mean', 'sum', 'min', 'max', 'std', 'median', 'var'], - index=0, - help='Method used to aggregate over the selected dimensions', - ) + with agg_col2: + # Aggregation method selection + agg_method = st.selectbox( + 'Method:', + ['mean', 'sum', 'min', 'max', 'std', 'median', 'var'], + index=0, + ) # Apply aggregation if dimensions were selected if agg_dims: orig_dims = dims.copy() array = aggregate_dimensions(array, agg_dims, agg_method, container) - - # Update the list of available dimensions after aggregation dims = list(array.dims) # Show information about the aggregation removed_dims = [dim for dim in orig_dims if dim not in dims] if removed_dims: - msg = f'Applied {agg_method} aggregation over: {", ".join(removed_dims)}' + msg = f'Applied {agg_method} over: {", ".join(removed_dims)}' container.info(msg) # If no dimensions left after aggregation, show scalar result @@ -418,59 +433,67 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) plot_1d(array, var_name, container) return array, None - # Visualization options for 2+ dimensions - container.subheader('Visualization Settings') - - # Choose which dimension to put on x-axis - with col2: - x_dim = st.selectbox('X dimension:', dims, index=0) - - # Choose which dimension to put on y-axis if we have at least 2 dimensions - remaining_dims = [d for d in dims if d != x_dim] - y_dim = None - if len(remaining_dims) > 0: - y_dim_options = ['None'] + remaining_dims - y_dim_selection = st.selectbox('Y dimension:', y_dim_options, index=1) - if y_dim_selection != 'None': - y_dim = y_dim_selection - - # Add plot type selector - plot_types = ['Heatmap', 'Line', 'Stacked Bar', 'Grouped Bar'] - if y_dim is None: - # Remove heatmap option if there's no Y dimension - plot_types = [pt for pt in plot_types if pt != 'Heatmap'] - default_idx = 0 # Default to Line for 1D - else: - default_idx = 0 # Default to Heatmap for 2D - - plot_type = st.selectbox('Plot type:', plot_types, index=default_idx) + # === VISUALIZATION SETTINGS TAB === + with viz_tab: + # Use columns for visualization settings + viz_col1, viz_col2 = viz_tab.columns(2) + + with viz_col1: + # Choose which dimension to put on x-axis + x_dim = st.selectbox('X dimension:', dims, index=0) + + # Choose which dimension to put on y-axis if we have at least 2 dimensions + remaining_dims = [d for d in dims if d != x_dim] + y_dim = None + if len(remaining_dims) > 0: + y_dim_options = ['None'] + remaining_dims + y_dim_selection = st.selectbox('Y dimension:', y_dim_options, index=1) + if y_dim_selection != 'None': + y_dim = y_dim_selection + + with viz_col2: + # Add plot type selector + plot_types = ['Heatmap', 'Line', 'Stacked Bar', 'Grouped Bar'] + if y_dim is None: + # Remove heatmap option if there's no Y dimension + plot_types = [pt for pt in plot_types if pt != 'Heatmap'] + default_idx = 0 # Default to Line for 1D + else: + default_idx = 0 # Default to Heatmap for 2D + + plot_type = st.selectbox('Plot type:', plot_types, index=default_idx) # If we have more than the selected dimensions, let user select values for other dimensions - container.subheader('Other Dimension Values') - # Calculate which dimensions need slicers slice_dims = [d for d in dims if d not in ([x_dim] if y_dim is None else [x_dim, y_dim])] slice_indexes = {} - # Create sliders in a more compact layout if there are many dimensions + # Create a more compact layout for dimension sliders if len(slice_dims) > 0: - if len(slice_dims) <= 3: - # For a few dimensions, use columns - cols = container.columns(len(slice_dims)) - for i, dim in enumerate(slice_dims): - dim_size = array.sizes[dim] - with cols[i]: - slice_indexes[dim] = st.slider( - f'{dim}', 0, dim_size - 1, dim_size // 2, help=f'Select position along {dim} dimension' - ) - else: - # For many dimensions, use a more compact layout - with container.expander('Select values for other dimensions', expanded=True): - for dim in slice_dims: - dim_size = array.sizes[dim] - slice_indexes[dim] = st.slider( - f'{dim}', 0, dim_size - 1, dim_size // 2, help=f'Select position along {dim} dimension' - ) + with container.expander('Other Dimension Values', expanded=True): + # Calculate optimal number of columns based on number of dimensions + num_cols = min(4, len(slice_dims)) # Max 4 columns to keep things readable + + # Create a grid layout of sliders + for i in range(0, len(slice_dims), num_cols): + # Create a new row of columns + cols = st.columns(num_cols) + + # Fill the row with sliders + for j in range(num_cols): + col_idx = i + j + if col_idx < len(slice_dims): + dim = slice_dims[col_idx] + dim_size = array.sizes[dim] + with cols[j]: + slice_indexes[dim] = st.slider( + f'{dim}', + 0, + dim_size - 1, + dim_size // 2, + help=f'Position on {dim}', + key=f'slider_{dim}', # Adding keys helps prevent UI issues + ) # Create slice dictionary for selection slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} @@ -492,7 +515,7 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) array_slice.transpose(y_dim, x_dim).values, x=array_slice[x_dim].values, y=array_slice[y_dim].values, - color_continuous_scale='viridis', + color_continuous_scale='portland', labels={'x': x_dim, 'y': y_dim, 'color': var_name}, ) fig.update_layout(height=500) @@ -800,7 +823,6 @@ def custom_heatmap_plotter( return array_slice, slice_dict - @show_traceback() def explore_results_app(results): """ From 41859d790d14511c1fc99064fa8515d9b8ed1e98 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 15:14:51 +0200 Subject: [PATCH 80/87] Make dimension selector modular --- flixopt/explorer_app.py | 153 +++++++++++++++++++++++++++++++++------- 1 file changed, 127 insertions(+), 26 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index a27d29e2a..0dc9b91b4 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -276,6 +276,131 @@ def aggregate_dimensions( container.error(f'Error during aggregation: {str(e)}') return array # Return original array if aggregation fails +@show_traceback() +def create_dimension_selector( + array: xr.DataArray, + dim: str, + container: Optional[Any] = None, + unique_key: str = '', +) -> int: + """ + Create a dimension selector (dropdown or slider) for a given dimension of an xarray. + + Args: + array: The xarray DataArray to select from + dim: The dimension name to create a selector for + container: The Streamlit container to render in (if None, uses st) + unique_key: A unique key suffix to prevent widget conflicts + + Returns: + The selected index for the dimension + """ + if container is None: + container = st + + key_suffix = f'_{unique_key}' if unique_key else '' + + # Get dimension size + dim_size = array.sizes[dim] + + # Default to middle value + default_idx = dim_size // 2 + + # Check if this dimension has coordinates + if dim in array.coords: + values = array.coords[dim].values + + # Use dropdown if fewer than 100 values, slider otherwise + if len(values) < 100: + # Use dropdown with actual coordinate values + options = list(values) + + selected_value = container.selectbox( + f'{dim}', + options, + index=default_idx, + help=f'Select value for {dim}', + key=f'dim_select_{dim}{key_suffix}', + ) + + # Find the index of the selected value + if np.issubdtype(values.dtype, np.number): + # For numeric values, find the closest index + selected_idx = np.abs(values - selected_value).argmin() + else: + # For non-numeric values (strings, etc), find exact match + try: + selected_idx = np.where(values == selected_value)[0][0] + except: + # Fallback if exact match fails + selected_idx = default_idx + else: + # Use slider for dimensions with many values + selected_idx = container.slider( + f'{dim}', + 0, + dim_size - 1, + default_idx, + help=f'Position on {dim} dimension ({values[0]} to {values[-1]})', + key=f'dim_slider_{dim}{key_suffix}', + ) + + # Show the selected value for context + container.caption(f'Selected: {values[selected_idx]}') + else: + # No coordinates, use integer slider + selected_idx = container.slider( + f'{dim} index', + 0, + dim_size - 1, + default_idx, + help=f'Position on {dim} dimension (by index)', + key=f'dim_slider_idx_{dim}{key_suffix}', + ) + + return selected_idx + + +@show_traceback() +def create_dimension_selectors( + array: xr.DataArray, slice_dims: List[str], container: Optional[Any] = None, unique_key: str = '' +) -> Dict[str, int]: + """ + Create selectors for multiple dimensions and organize them in a grid layout. + + Args: + array: The xarray DataArray to select from + slice_dims: List of dimension names to create selectors for + container: The Streamlit container to render in (if None, uses st) + unique_key: A unique key suffix to prevent widget conflicts + + Returns: + Dictionary mapping dimension names to selected indices + """ + if container is None: + container = st + + slice_indexes = {} + + if len(slice_dims) > 0: + with container.expander('Dimension Values', expanded=True): + # Calculate optimal number of columns based on number of dimensions + num_cols = min(3, len(slice_dims)) # Max 3 columns to keep things readable + + # Create a grid layout of selectors + for i in range(0, len(slice_dims), num_cols): + # Create a new row of columns + cols = container.columns(num_cols) + + # Fill the row with selectors + for j in range(num_cols): + col_idx = i + j + if col_idx < len(slice_dims): + dim = slice_dims[col_idx] + with cols[j]: + slice_indexes[dim] = create_dimension_selector(array, dim, cols[j], f'{unique_key}_{i}_{j}') + + return slice_indexes @show_traceback() def plot_scalar(array: xr.DataArray, container: Optional[Any] = None) -> None: @@ -468,32 +593,8 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) slice_dims = [d for d in dims if d not in ([x_dim] if y_dim is None else [x_dim, y_dim])] slice_indexes = {} - # Create a more compact layout for dimension sliders - if len(slice_dims) > 0: - with container.expander('Other Dimension Values', expanded=True): - # Calculate optimal number of columns based on number of dimensions - num_cols = min(4, len(slice_dims)) # Max 4 columns to keep things readable - - # Create a grid layout of sliders - for i in range(0, len(slice_dims), num_cols): - # Create a new row of columns - cols = st.columns(num_cols) - - # Fill the row with sliders - for j in range(num_cols): - col_idx = i + j - if col_idx < len(slice_dims): - dim = slice_dims[col_idx] - dim_size = array.sizes[dim] - with cols[j]: - slice_indexes[dim] = st.slider( - f'{dim}', - 0, - dim_size - 1, - dim_size // 2, - help=f'Position on {dim}', - key=f'slider_{dim}', # Adding keys helps prevent UI issues - ) + # Create a more compact layout for dimension dropdown selectors + slice_indexes = create_dimension_selectors(array, slice_dims, container, 'key') # Create slice dictionary for selection slice_dict = {dim: slice_indexes[dim] for dim in slice_dims} From 5ba55cb915036cd701ab9e00c7c98ee78e948aae Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 15:16:11 +0200 Subject: [PATCH 81/87] Remove custom css --- flixopt/explorer_app.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/flixopt/explorer_app.py b/flixopt/explorer_app.py index 0dc9b91b4..6c8cfe4cc 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/explorer_app.py @@ -497,20 +497,6 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) dims = list(array.dims) - # Add custom CSS to reduce spacing - container.markdown( - """ - - """, - unsafe_allow_html=True, - ) - # Use tabs for main sections dim_tab, viz_tab = container.tabs(['Dimension Settings', 'Visualization Settings']) From c4fe43b21f9eb03bbad41bbc53983de4dabbc75f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 15 May 2025 15:24:40 +0200 Subject: [PATCH 82/87] Finalize streamlit app --- ...sults_explorer.py => _results_explorer.py} | 3 +- ...plorer_app.py => _results_explorer_app.py} | 47 +++++++------------ flixopt/results.py | 2 +- 3 files changed, 19 insertions(+), 33 deletions(-) rename flixopt/{results_explorer.py => _results_explorer.py} (92%) rename flixopt/{explorer_app.py => _results_explorer_app.py} (97%) diff --git a/flixopt/results_explorer.py b/flixopt/_results_explorer.py similarity index 92% rename from flixopt/results_explorer.py rename to flixopt/_results_explorer.py index dc8cb527b..e529897f5 100644 --- a/flixopt/results_explorer.py +++ b/flixopt/_results_explorer.py @@ -14,6 +14,7 @@ def explore_results(self, port=8501): """ Launch a Streamlit app to explore the calculation results. + This function is experimental and might have issues. Args: port: Port to use for the Streamlit server @@ -24,7 +25,7 @@ def explore_results(self, port=8501): # Find explorer app path current_dir = os.path.dirname(os.path.abspath(__file__)) - explorer_script = os.path.join(current_dir, 'explorer_app.py') + explorer_script = os.path.join(current_dir, '_results_explorer_app.py') # If the explorer app doesn't exist, inform the user if not os.path.exists(explorer_script): diff --git a/flixopt/explorer_app.py b/flixopt/_results_explorer_app.py similarity index 97% rename from flixopt/explorer_app.py rename to flixopt/_results_explorer_app.py index 6c8cfe4cc..0bfaaddac 100644 --- a/flixopt/explorer_app.py +++ b/flixopt/_results_explorer_app.py @@ -1,21 +1,21 @@ # FlixOpt Results Explorer App import argparse +import functools +import inspect +import io import os import sys -import io import tempfile -from typing import Dict, List, Optional, Union, Tuple, Any, Callable, cast, TypeVar import traceback -import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast import numpy as np -import functools import pandas as pd -import streamlit as st -import xarray as xr import plotly.express as px import plotly.graph_objects as go +import streamlit as st +import xarray as xr T = TypeVar('T') @@ -71,7 +71,7 @@ def wrapper(*args, **kwargs): # Try to get source code try: display.code(inspect.getsource(func), language='python') - except: + except Exception: display.warning('Could not retrieve function source code.') # Show arguments @@ -85,7 +85,7 @@ def wrapper(*args, **kwargs): if len(repr_arg) > 200: # Truncate long representations repr_arg = repr_arg[:200] + '...' safe_args.append(repr_arg) - except: + except Exception: safe_args.append('[Representation failed]') # Safely represent kwargs @@ -96,7 +96,7 @@ def wrapper(*args, **kwargs): if len(repr_v) > 200: # Truncate long representations repr_v = repr_v[:200] + '...' safe_kwargs[k] = repr_v - except: + except Exception: safe_kwargs[k] = '[Representation failed]' # Display args and kwargs @@ -216,7 +216,7 @@ def display_variable_stats(array: xr.DataArray, container: Optional[Any] = None) stats_cols[1].metric('Max', float(array.max().values)) stats_cols[2].metric('Mean', float(array.mean().values)) stats_cols[3].metric('Std', float(array.std().values)) - except: + except Exception: pass @@ -276,6 +276,7 @@ def aggregate_dimensions( container.error(f'Error during aggregation: {str(e)}') return array # Return original array if aggregation fails + @show_traceback() def create_dimension_selector( array: xr.DataArray, @@ -331,7 +332,7 @@ def create_dimension_selector( # For non-numeric values (strings, etc), find exact match try: selected_idx = np.where(values == selected_value)[0][0] - except: + except Exception: # Fallback if exact match fails selected_idx = default_idx else: @@ -402,6 +403,7 @@ def create_dimension_selectors( return slice_indexes + @show_traceback() def plot_scalar(array: xr.DataArray, container: Optional[Any] = None) -> None: """ @@ -668,6 +670,7 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) container.plotly_chart(fig, use_container_width=True) return array_slice, slice_dict + @show_traceback() def display_data_preview(array: xr.DataArray, container: Optional[Any] = None) -> pd.DataFrame: """ @@ -1149,30 +1152,12 @@ def run_explorer_from_file(folder, name): """ # Import the relevant modules try: - # Try different import approaches - try: - # First try standard import - try: - from flixopt.results import CalculationResults - except ImportError: - from flixopt.results import CalculationResults - except ImportError: - # Add potential module paths - for path in [os.getcwd(), os.path.dirname(os.path.abspath(__file__))]: - if path not in sys.path: - sys.path.append(path) - - # Try again with modified path - try: - from flixopt.results import CalculationResults - except ImportError: - from flixopt.results import CalculationResults - + from flixopt.results import CalculationResults # Load from file results = CalculationResults.from_file(folder, name) explore_results_app(results) except Exception as e: - st.error(f"Error loading calculation results: {e}") + st.error(f"Error loading calculation results for streamlit app: {e}") st.stop() # Entry point for module execution diff --git a/flixopt/results.py b/flixopt/results.py index 92620de7d..d5aac8479 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -17,7 +17,7 @@ from . import plotting from .core import DataConverter, TimeSeriesCollection from .flow_system import FlowSystem -from .results_explorer import explore_results +from ._results_explorer import explore_results if TYPE_CHECKING: import pyvis From 936d9abdac476e1b9f4fb484bbfea50c4da0f66b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 16 May 2025 08:13:42 +0200 Subject: [PATCH 83/87] Bugfix stacked bars in dashboard --- flixopt/_results_explorer_app.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flixopt/_results_explorer_app.py b/flixopt/_results_explorer_app.py index 0bfaaddac..ab3fdbfd7 100644 --- a/flixopt/_results_explorer_app.py +++ b/flixopt/_results_explorer_app.py @@ -633,13 +633,14 @@ def plot_nd(array: xr.DataArray, var_name: str, container: Optional[Any] = None) # Stacked bar chart # Convert to dataframe for easier plotting df = array_slice.to_dataframe(name='value').reset_index() + df = df.fillna(0) # Fixes issues with stacking fig = px.bar( df, x=x_dim, y='value', color=y_dim, - barmode='stack', + barmode='relative', labels={'value': var_name, x_dim: x_dim, y_dim: y_dim}, ) fig.update_layout(height=500) From 4016d2e977fa2086bf6daa7d5ddb257622c3cf0b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 16 May 2025 08:21:39 +0200 Subject: [PATCH 84/87] Move launching function to results object directly --- flixopt/_results_explorer.py | 61 ------------------------------------ flixopt/results.py | 57 +++++++++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 63 deletions(-) delete mode 100644 flixopt/_results_explorer.py diff --git a/flixopt/_results_explorer.py b/flixopt/_results_explorer.py deleted file mode 100644 index e529897f5..000000000 --- a/flixopt/_results_explorer.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -FlixOpt Results Explorer - -A module for launching a Streamlit app to explore flixopt calculation results. -""" - -import os -import subprocess -import sys -import webbrowser -from pathlib import Path - - -def explore_results(self, port=8501): - """ - Launch a Streamlit app to explore the calculation results. - This function is experimental and might have issues. - - Args: - port: Port to use for the Streamlit server - - Returns: - subprocess.Popen: The running Streamlit process - """ - - # Find explorer app path - current_dir = os.path.dirname(os.path.abspath(__file__)) - explorer_script = os.path.join(current_dir, '_results_explorer_app.py') - - # If the explorer app doesn't exist, inform the user - if not os.path.exists(explorer_script): - raise FileNotFoundError( - f'Explorer app not found at {explorer_script}. ' - 'Please ensure the explorer_app.py file is in the flixopt package directory.' - ) - - # Run the Streamlit app - the port argument needs to be separate from the script arguments - cmd = [ - sys.executable, - '-m', - 'streamlit', - 'run', - explorer_script, - '--server.port', - str(port), - '--', # This separator is important - str(self.folder), - self.name, - ] - - self.to_file() # Save results to file. This is needed to be able to launch the app from the file. # TODO - - # Launch the Streamlit app - process = subprocess.Popen(cmd) - - # Open browser - webbrowser.open(f'http://localhost:{port}') - - print(f'Streamlit app launched on port {port}. Press Ctrl+C to stop the app.') - - return process diff --git a/flixopt/results.py b/flixopt/results.py index d5aac8479..17d855d0d 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -17,7 +17,6 @@ from . import plotting from .core import DataConverter, TimeSeriesCollection from .flow_system import FlowSystem -from ._results_explorer import explore_results if TYPE_CHECKING: import pyvis @@ -66,7 +65,59 @@ class CalculationResults: >>> results.to_file(folder='new_results_dir', compression=5) # Save the results to a new folder """ - launch_dashboard = explore_results + def launch_dashboard(self, port=8501): + import os + import subprocess + import sys + + # Rest of your code with try/except + try: + # Find explorer app path + current_dir = os.path.dirname(os.path.abspath(__file__)) + explorer_script = os.path.join(current_dir, '_results_explorer_app.py') + + if not os.path.exists(explorer_script): + raise FileNotFoundError( + f'Explorer app not found at {explorer_script}. ' + 'Please ensure the explorer_app.py file is in the flixopt package directory.' + ) + + # Run the Streamlit app - the port argument needs to be separate from the script arguments + cmd = [ + sys.executable, + '-m', + 'streamlit', + 'run', + explorer_script, + '--server.port', + str(port), + '--server.headless', + 'false', # This makes Streamlit open the browser itself + '--', + str(self.folder), + self.name, + ] + + self.to_file() + + self._dashboard_process = subprocess.Popen(cmd) + + logger.info(f'Streamlit app launched on port {port}. Use CalculationResults.stop_dashboard() to stop it.') + return self._dashboard_process + + except Exception as e: + print(f'Error launching Streamlit app: {e}') + return None + + def stop_dashboard(self): + """Stop the Streamlit dashboard process""" + if self._dashboard_process: + try: + self._dashboard_process.terminate() + self._dashboard_process.wait(timeout=5) # Wait up to 5 seconds for clean termination + except Exception: + self._dashboard_process.kill() # Force kill if needed + logger.info('Streamlit app has been stopped.') @classmethod def from_file(cls, folder: Union[str, pathlib.Path], name: str): @@ -207,6 +258,8 @@ def __init__( self._effects_per_component = {'operation': None, 'invest': None, 'total': None} self._flow_network_info_ = None + self._dashboard_process = None + def __getitem__(self, key: str) -> Union['ComponentResults', 'BusResults', 'EffectResults', 'FlowResults']: if key in self.components: return self.components[key] From 42be2229912740825d10a050112bf8968a0293cd Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 16 May 2025 08:49:54 +0200 Subject: [PATCH 85/87] Move dashboard functions down --- flixopt/results.py | 108 ++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/flixopt/results.py b/flixopt/results.py index 17d855d0d..75d048c5a 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -65,60 +65,6 @@ class CalculationResults: >>> results.to_file(folder='new_results_dir', compression=5) # Save the results to a new folder """ - def launch_dashboard(self, port=8501): - import os - import subprocess - import sys - - # Rest of your code with try/except - try: - # Find explorer app path - current_dir = os.path.dirname(os.path.abspath(__file__)) - explorer_script = os.path.join(current_dir, '_results_explorer_app.py') - - if not os.path.exists(explorer_script): - raise FileNotFoundError( - f'Explorer app not found at {explorer_script}. ' - 'Please ensure the explorer_app.py file is in the flixopt package directory.' - ) - - # Run the Streamlit app - the port argument needs to be separate from the script arguments - cmd = [ - sys.executable, - '-m', - 'streamlit', - 'run', - explorer_script, - '--server.port', - str(port), - '--server.headless', - 'false', # This makes Streamlit open the browser itself - '--', - str(self.folder), - self.name, - ] - - self.to_file() - - self._dashboard_process = subprocess.Popen(cmd) - - logger.info(f'Streamlit app launched on port {port}. Use CalculationResults.stop_dashboard() to stop it.') - return self._dashboard_process - - except Exception as e: - print(f'Error launching Streamlit app: {e}') - return None - - def stop_dashboard(self): - """Stop the Streamlit dashboard process""" - if self._dashboard_process: - try: - self._dashboard_process.terminate() - self._dashboard_process.wait(timeout=5) # Wait up to 5 seconds for clean termination - except Exception: - self._dashboard_process.kill() # Force kill if needed - logger.info('Streamlit app has been stopped.') - @classmethod def from_file(cls, folder: Union[str, pathlib.Path], name: str): """Create CalculationResults instance by loading from saved files. @@ -468,6 +414,60 @@ def sizes( filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None} return filter_dataarray_by_coord(self._sizes, **filters) + def launch_dashboard(self, port=8501): + import os + import subprocess + import sys + + # Rest of your code with try/except + try: + # Find explorer app path + current_dir = os.path.dirname(os.path.abspath(__file__)) + explorer_script = os.path.join(current_dir, '_results_explorer_app.py') + + if not os.path.exists(explorer_script): + raise FileNotFoundError( + f'Explorer app not found at {explorer_script}. ' + 'Please ensure the explorer_app.py file is in the flixopt package directory.' + ) + + # Run the Streamlit app - the port argument needs to be separate from the script arguments + cmd = [ + sys.executable, + '-m', + 'streamlit', + 'run', + explorer_script, + '--server.port', + str(port), + '--server.headless', + 'false', # This makes Streamlit open the browser itself + '--', + str(self.folder), + self.name, + ] + + self.to_file() + + self._dashboard_process = subprocess.Popen(cmd) + + logger.info(f'Streamlit app launched on port {port}. Use CalculationResults.stop_dashboard() to stop it.') + return self._dashboard_process + + except Exception as e: + print(f'Error launching Streamlit app: {e}') + return None + + def stop_dashboard(self): + """Stop the Streamlit dashboard process""" + if self._dashboard_process: + try: + self._dashboard_process.terminate() + self._dashboard_process.wait(timeout=5) # Wait up to 5 seconds for clean termination + except Exception: + self._dashboard_process.kill() # Force kill if needed + logger.info('Streamlit app has been stopped.') + def _assign_flow_coords(self, da: xr.DataArray): # Add start and end coordinates da = da.assign_coords({ From 2ee234bc877cb29077c26b91fb13a7a1787f0817 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 16 May 2025 08:52:28 +0200 Subject: [PATCH 86/87] Ensure scenario dimensionis categorically plotted --- flixopt/_results_explorer_app.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flixopt/_results_explorer_app.py b/flixopt/_results_explorer_app.py index ab3fdbfd7..2615f7141 100644 --- a/flixopt/_results_explorer_app.py +++ b/flixopt/_results_explorer_app.py @@ -754,6 +754,10 @@ def xarray_explorer( array_to_plot = data selected_var = data.name if data.name else 'Data' + # Convert scenario dimension to string to ensure categorical plots + if 'scenario' in array_to_plot.dims: + array_to_plot = array_to_plot.assign_coords({'scenario': array_to_plot.coords['scenario'].astype(str)}) + # Initialize result dictionary result = { 'data': data, From bc0739a43a05c1eb6e210ca39587140b487b6f6d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 16 May 2025 08:58:16 +0200 Subject: [PATCH 87/87] Change order of elements --- flixopt/_results_explorer_app.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/flixopt/_results_explorer_app.py b/flixopt/_results_explorer_app.py index 2615f7141..8b59b3669 100644 --- a/flixopt/_results_explorer_app.py +++ b/flixopt/_results_explorer_app.py @@ -795,11 +795,10 @@ def xarray_explorer( result['slice_dict'] = slice_dict # Data preview section - container.subheader('Data Preview') - display_data_preview(array_to_plot, container) + with container.expander('Data Preview', expanded=False): + display_data_preview(array_to_plot, container) # Download options - container.subheader('Download Options') download_format = container.selectbox('Download format', ['CSV', 'NetCDF', 'Excel']) if container.button('Download filtered data'):