diff --git a/CHANGELOG.md b/CHANGELOG.md
index bd11fb442..62d852ea9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -51,7 +51,7 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp
## [Unreleased] - ????-??-??
-**Summary**: Type system overhaul with comprehensive type hints for better IDE support and code clarity.
+**Summary**: Type system overhaul and migration to loguru for logging
If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOpt/flixOpt/releases/tag/v3.0.0) and [Migration Guide](https://flixopt.github.io/flixopt/latest/user-guide/migration-guide-v3/).
@@ -64,8 +64,13 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp
- Added `Scalar` type for scalar-only numeric values
- Added `NumericOrBool` utility type for internal use
- Type system supports scalars, numpy arrays, pandas Series/DataFrames, and xarray DataArrays
+- Lazy logging evaluation - expensive log operations only execute when log level is active
+- `CONFIG.Logging.verbose_tracebacks` option for detailed debugging with variable values
### 💥 Breaking Changes
+- **Logging framework**: Migrated to [loguru](https://loguru.readthedocs.io/)
+ - Removed `CONFIG.Logging` parameters: `rich`, `Colors`, `date_format`, `format`, `console_width`, `show_path`, `show_logger_name`
+ - For advanced formatting, use loguru's API directly after `CONFIG.apply()`
### ♻️ Changed
- **Code structure**: Removed `commons.py` module and moved all imports directly to `__init__.py` for cleaner code organization (no public API changes)
@@ -83,13 +88,15 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp
### 📦 Dependencies
- Updated `mkdocs-material` to v9.6.23
+- Replaced `rich >= 13.0.0` with `loguru >= 0.7.0` for logging
### 📝 Docs
- Enhanced documentation in `flixopt/types.py` with comprehensive examples and dimension explanation table
- Clarified Effect type docstrings - Effect types are dicts, but single numeric values work through union types
- Added clarifying comments in `effects.py` explaining parameter handling and transformation
- Improved OnOffParameters attribute documentation
-
+- Updated getting-started guide with loguru examples
+- Updated `config.py` docstrings for loguru integration
### 👷 Development
- Added test for FlowSystem resampling
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 044ffb872..5841de3a4 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -22,6 +22,24 @@ For all features including interactive network visualizations and time series ag
pip install "flixopt[full]"
```
+## Logging
+
+FlixOpt uses [loguru](https://loguru.readthedocs.io/) for logging. Logging is silent by default but can be easily configured. For beginners, use our internal convenience methods. Experts can use loguru directly.
+
+```python
+from flixopt import CONFIG
+
+# Enable console logging
+CONFIG.Logging.console = True
+CONFIG.Logging.level = 'INFO'
+CONFIG.apply()
+
+# Or use a preset configuration for exploring
+CONFIG.exploring()
+```
+
+For more details on logging configuration, see the [`CONFIG.Logging`][flixopt.config.CONFIG.Logging] documentation.
+
## Basic Workflow
Working with FlixOpt follows a general pattern:
diff --git a/examples/02_Complex/complex_example.py b/examples/02_Complex/complex_example.py
index b8ef76a03..3ff5b251c 100644
--- a/examples/02_Complex/complex_example.py
+++ b/examples/02_Complex/complex_example.py
@@ -4,7 +4,6 @@
import numpy as np
import pandas as pd
-from rich.pretty import pprint # Used for pretty printing
import flixopt as fx
@@ -188,7 +187,7 @@
flow_system.add_elements(Costs, CO2, PE, Gaskessel, Waermelast, Gasbezug, Stromverkauf, speicher)
flow_system.add_elements(bhkw_2) if use_chp_with_piecewise_conversion else flow_system.add_elements(bhkw)
- pprint(flow_system) # Get a string representation of the FlowSystem
+ print(flow_system) # Get a string representation of the FlowSystem
try:
flow_system.start_network_app() # Start the network app
except ImportError as e:
diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py
index 9647e803c..7354cb877 100644
--- a/examples/05_Two-stage-optimization/two_stage_optimization.py
+++ b/examples/05_Two-stage-optimization/two_stage_optimization.py
@@ -7,17 +7,15 @@
While the final optimum might differ from the global optimum, the solving will be much faster.
"""
-import logging
import pathlib
import timeit
import pandas as pd
import xarray as xr
+from loguru import logger
import flixopt as fx
-logger = logging.getLogger('flixopt')
-
if __name__ == '__main__':
fx.CONFIG.exploring()
diff --git a/flixopt/aggregation.py b/flixopt/aggregation.py
index cd0fdde3c..99b13bd45 100644
--- a/flixopt/aggregation.py
+++ b/flixopt/aggregation.py
@@ -6,12 +6,12 @@
from __future__ import annotations
import copy
-import logging
import pathlib
import timeit
from typing import TYPE_CHECKING
import numpy as np
+from loguru import logger
try:
import tsam.timeseriesaggregation as tsam
@@ -37,8 +37,6 @@
from .elements import Component
from .flow_system import FlowSystem
-logger = logging.getLogger('flixopt')
-
class Aggregation:
"""
@@ -106,7 +104,7 @@ def cluster(self) -> None:
self.aggregated_data = self.tsam.predictOriginalData()
self.clustering_duration_seconds = timeit.default_timer() - start_time # Zeit messen:
- logger.info(self.describe_clusters())
+ logger.opt(lazy=True).info('{result}', result=lambda: self.describe_clusters())
def describe_clusters(self) -> str:
description = {}
diff --git a/flixopt/calculation.py b/flixopt/calculation.py
index 1125da401..64c589e3a 100644
--- a/flixopt/calculation.py
+++ b/flixopt/calculation.py
@@ -10,7 +10,6 @@
from __future__ import annotations
-import logging
import math
import pathlib
import sys
@@ -20,6 +19,7 @@
from typing import TYPE_CHECKING, Annotated, Any
import numpy as np
+from loguru import logger
from tqdm import tqdm
from . import io as fx_io
@@ -39,8 +39,6 @@
from .solvers import _Solver
from .structure import FlowSystemModel
-logger = logging.getLogger('flixopt')
-
class Calculation:
"""
@@ -238,7 +236,7 @@ def solve(
**solver.options,
)
self.durations['solving'] = round(timeit.default_timer() - t_start, 2)
- logger.info(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.')
+ logger.success(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.')
logger.info(f'Model status after solve: {self.model.status}')
if self.model.status == 'warning':
@@ -255,12 +253,10 @@ def solve(
# Log the formatted output
should_log = log_main_results if log_main_results is not None else CONFIG.Solving.log_main_results
if should_log:
- logger.info(
- f'{" Main Results ":#^80}\n'
- + fx_io.format_yaml_string(
- self.main_results,
- compact_numeric_lists=True,
- )
+ logger.opt(lazy=True).info(
+ '{result}',
+ result=lambda: f'{" Main Results ":#^80}\n'
+ + fx_io.format_yaml_string(self.main_results, compact_numeric_lists=True),
)
self.results = CalculationResults.from_calculation(self)
@@ -673,7 +669,7 @@ def do_modeling_and_solve(
for key, value in calc.durations.items():
self.durations[key] += value
- logger.info(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.')
+ logger.success(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.')
self.results = SegmentedCalculationResults.from_calculation(self)
diff --git a/flixopt/color_processing.py b/flixopt/color_processing.py
index 2959acc82..9d874e027 100644
--- a/flixopt/color_processing.py
+++ b/flixopt/color_processing.py
@@ -6,15 +6,12 @@
from __future__ import annotations
-import logging
-
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import plotly.express as px
+from loguru import logger
from plotly.exceptions import PlotlyError
-logger = logging.getLogger('flixopt')
-
def _rgb_string_to_hex(color: str) -> str:
"""Convert Plotly RGB/RGBA string format to hex.
diff --git a/flixopt/components.py b/flixopt/components.py
index 6a5abfc4e..c51b4b7d2 100644
--- a/flixopt/components.py
+++ b/flixopt/components.py
@@ -4,12 +4,12 @@
from __future__ import annotations
-import logging
import warnings
from typing import TYPE_CHECKING, Literal
import numpy as np
import xarray as xr
+from loguru import logger
from . import io as fx_io
from .core import PlausibilityError
@@ -25,8 +25,6 @@
from .flow_system import FlowSystem
from .types import Numeric_PS, Numeric_TPS
-logger = logging.getLogger('flixopt')
-
@register_class_for_io
class LinearConverter(Component):
diff --git a/flixopt/config.py b/flixopt/config.py
index d7ea824d9..07d7e24a9 100644
--- a/flixopt/config.py
+++ b/flixopt/config.py
@@ -1,23 +1,16 @@
from __future__ import annotations
-import logging
import os
import sys
import warnings
-from logging.handlers import RotatingFileHandler
from pathlib import Path
from types import MappingProxyType
from typing import Literal
-from rich.console import Console
-from rich.logging import RichHandler
-from rich.style import Style
-from rich.theme import Theme
+from loguru import logger
__all__ = ['CONFIG', 'change_logging_level']
-logger = logging.getLogger('flixopt')
-
# SINGLE SOURCE OF TRUTH - immutable to prevent accidental modification
_DEFAULTS = MappingProxyType(
@@ -27,24 +20,10 @@
{
'level': 'INFO',
'file': None,
- 'rich': False,
'console': False,
'max_file_size': 10_485_760, # 10MB
'backup_count': 5,
- 'date_format': '%Y-%m-%d %H:%M:%S',
- 'format': '%(message)s',
- 'console_width': 120,
- 'show_path': False,
- 'show_logger_name': False,
- 'colors': MappingProxyType(
- {
- 'DEBUG': '\033[90m', # Bright Black/Gray
- 'INFO': '\033[0m', # Default/White
- 'WARNING': '\033[33m', # Yellow
- 'ERROR': '\033[31m', # Red
- 'CRITICAL': '\033[1m\033[31m', # Bold Red
- }
- ),
+ 'verbose_tracebacks': False,
}
),
'modeling': MappingProxyType(
@@ -81,6 +60,9 @@ class CONFIG:
Always call ``CONFIG.apply()`` after changes.
+ Note:
+ flixopt uses `loguru `_ for logging.
+
Attributes:
Logging: Logging configuration.
Modeling: Optimization modeling parameters.
@@ -114,86 +96,48 @@ class Logging:
Silent by default. Enable via ``console=True`` or ``file='path'``.
Attributes:
- level: Logging level.
- file: Log file path for file logging.
- console: Enable console output.
- rich: Use Rich library for enhanced output.
- max_file_size: Max file size before rotation.
+ level: Logging level (DEBUG, INFO, SUCCESS, WARNING, ERROR, CRITICAL).
+ file: Log file path for file logging (None to disable).
+ console: Enable console output (True/'stdout' or 'stderr').
+ max_file_size: Max file size in bytes before rotation.
backup_count: Number of backup files to keep.
- date_format: Date/time format string.
- format: Log message format string.
- console_width: Console width for Rich handler.
- show_path: Show file paths in messages.
- show_logger_name: Show logger name in messages.
- Colors: ANSI color codes for log levels.
+ verbose_tracebacks: Show detailed tracebacks with variable values.
Examples:
```python
+ # Enable console logging
+ CONFIG.Logging.console = True
+ CONFIG.Logging.level = 'DEBUG'
+ CONFIG.apply()
+
# File logging with rotation
CONFIG.Logging.file = 'app.log'
CONFIG.Logging.max_file_size = 5_242_880 # 5MB
CONFIG.apply()
- # Rich handler with stdout
- CONFIG.Logging.console = True # or 'stdout'
- CONFIG.Logging.rich = True
- CONFIG.apply()
-
- # Console output to stderr
+ # Console to stderr
CONFIG.Logging.console = 'stderr'
CONFIG.apply()
```
+
+ Note:
+ For advanced formatting or custom loguru configuration,
+ use loguru's API directly after calling CONFIG.apply():
+
+ ```python
+ from loguru import logger
+
+ CONFIG.apply() # Basic setup
+ logger.add('custom.log', format='{time} {message}')
+ ```
"""
- level: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] = _DEFAULTS['logging']['level']
+ level: Literal['DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', 'CRITICAL'] = _DEFAULTS['logging']['level']
file: str | None = _DEFAULTS['logging']['file']
- rich: bool = _DEFAULTS['logging']['rich']
console: bool | Literal['stdout', 'stderr'] = _DEFAULTS['logging']['console']
max_file_size: int = _DEFAULTS['logging']['max_file_size']
backup_count: int = _DEFAULTS['logging']['backup_count']
- date_format: str = _DEFAULTS['logging']['date_format']
- format: str = _DEFAULTS['logging']['format']
- console_width: int = _DEFAULTS['logging']['console_width']
- show_path: bool = _DEFAULTS['logging']['show_path']
- show_logger_name: bool = _DEFAULTS['logging']['show_logger_name']
-
- class Colors:
- """ANSI color codes for log levels.
-
- Attributes:
- DEBUG: ANSI color for DEBUG level.
- INFO: ANSI color for INFO level.
- WARNING: ANSI color for WARNING level.
- ERROR: ANSI color for ERROR level.
- CRITICAL: ANSI color for CRITICAL level.
-
- Examples:
- ```python
- CONFIG.Logging.Colors.INFO = '\\033[32m' # Green
- CONFIG.Logging.Colors.ERROR = '\\033[1m\\033[31m' # Bold red
- CONFIG.apply()
- ```
-
- Common ANSI codes:
- - '\\033[30m' - Black
- - '\\033[31m' - Red
- - '\\033[32m' - Green
- - '\\033[33m' - Yellow
- - '\\033[34m' - Blue
- - '\\033[35m' - Magenta
- - '\\033[36m' - Cyan
- - '\\033[37m' - White
- - '\\033[90m' - Bright Black/Gray
- - '\\033[0m' - Reset to default
- - '\\033[1m\\033[3Xm' - Bold (replace X with color code 0-7)
- - '\\033[2m\\033[3Xm' - Dim (replace X with color code 0-7)
- """
-
- DEBUG: str = _DEFAULTS['logging']['colors']['DEBUG']
- INFO: str = _DEFAULTS['logging']['colors']['INFO']
- WARNING: str = _DEFAULTS['logging']['colors']['WARNING']
- ERROR: str = _DEFAULTS['logging']['colors']['ERROR']
- CRITICAL: str = _DEFAULTS['logging']['colors']['CRITICAL']
+ verbose_tracebacks: bool = _DEFAULTS['logging']['verbose_tracebacks']
class Modeling:
"""Optimization modeling parameters.
@@ -274,12 +218,7 @@ class Plotting:
def reset(cls):
"""Reset all configuration values to defaults."""
for key, value in _DEFAULTS['logging'].items():
- if key == 'colors':
- # Reset nested Colors class
- for color_key, color_value in value.items():
- setattr(cls.Logging.Colors, color_key, color_value)
- else:
- setattr(cls.Logging, key, value)
+ setattr(cls.Logging, key, value)
for key, value in _DEFAULTS['modeling'].items():
setattr(cls.Modeling, key, value)
@@ -296,15 +235,7 @@ def reset(cls):
@classmethod
def apply(cls):
"""Apply current configuration to logging system."""
- # Convert Colors class attributes to dict
- colors_dict = {
- 'DEBUG': cls.Logging.Colors.DEBUG,
- 'INFO': cls.Logging.Colors.INFO,
- 'WARNING': cls.Logging.Colors.WARNING,
- 'ERROR': cls.Logging.Colors.ERROR,
- 'CRITICAL': cls.Logging.Colors.CRITICAL,
- }
- valid_levels = list(colors_dict)
+ valid_levels = ['DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', 'CRITICAL']
if cls.Logging.level.upper() not in valid_levels:
raise ValueError(f"Invalid log level '{cls.Logging.level}'. Must be one of: {', '.join(valid_levels)}")
@@ -320,16 +251,10 @@ def apply(cls):
_setup_logging(
default_level=cls.Logging.level,
log_file=cls.Logging.file,
- use_rich_handler=cls.Logging.rich,
console=cls.Logging.console,
max_file_size=cls.Logging.max_file_size,
backup_count=cls.Logging.backup_count,
- date_format=cls.Logging.date_format,
- format=cls.Logging.format,
- console_width=cls.Logging.console_width,
- show_path=cls.Logging.show_path,
- show_logger_name=cls.Logging.show_logger_name,
- colors=colors_dict,
+ verbose_tracebacks=cls.Logging.verbose_tracebacks,
)
@classmethod
@@ -364,11 +289,7 @@ def _apply_config_dict(cls, config_dict: dict):
for key, value in config_dict.items():
if key == 'logging' and isinstance(value, dict):
for nested_key, nested_value in value.items():
- if nested_key == 'colors' and isinstance(nested_value, dict):
- # Handle nested colors under logging
- for color_key, color_value in nested_value.items():
- setattr(cls.Logging.Colors, color_key, color_value)
- else:
+ if hasattr(cls.Logging, nested_key):
setattr(cls.Logging, nested_key, nested_value)
elif key == 'modeling' and isinstance(value, dict):
for nested_key, nested_value in value.items():
@@ -394,22 +315,10 @@ def to_dict(cls) -> dict:
'logging': {
'level': cls.Logging.level,
'file': cls.Logging.file,
- 'rich': cls.Logging.rich,
'console': cls.Logging.console,
'max_file_size': cls.Logging.max_file_size,
'backup_count': cls.Logging.backup_count,
- 'date_format': cls.Logging.date_format,
- 'format': cls.Logging.format,
- 'console_width': cls.Logging.console_width,
- 'show_path': cls.Logging.show_path,
- 'show_logger_name': cls.Logging.show_logger_name,
- 'colors': {
- 'DEBUG': cls.Logging.Colors.DEBUG,
- 'INFO': cls.Logging.Colors.INFO,
- 'WARNING': cls.Logging.Colors.WARNING,
- 'ERROR': cls.Logging.Colors.ERROR,
- 'CRITICAL': cls.Logging.Colors.CRITICAL,
- },
+ 'verbose_tracebacks': cls.Logging.verbose_tracebacks,
},
'modeling': {
'big': cls.Modeling.big,
@@ -451,11 +360,12 @@ def silent(cls) -> type[CONFIG]:
def debug(cls) -> type[CONFIG]:
"""Configure for debug mode with verbose output.
- Enables console logging at DEBUG level and all solver output for
- troubleshooting. Automatically calls apply().
+ Enables console logging at DEBUG level, verbose tracebacks,
+ and all solver output for troubleshooting. Automatically calls apply().
"""
cls.Logging.console = True
cls.Logging.level = 'DEBUG'
+ cls.Logging.verbose_tracebacks = True
cls.Solving.log_to_console = True
cls.Solving.log_main_results = True
cls.apply()
@@ -497,274 +407,106 @@ def browser_plotting(cls) -> type[CONFIG]:
return cls
-class MultilineFormatter(logging.Formatter):
- """Formatter that handles multi-line messages with consistent prefixes.
+def _format_multiline(record):
+ """Format multi-line messages with box-style borders for better readability.
- Args:
- fmt: Log message format string.
- datefmt: Date/time format string.
- show_logger_name: Show logger name in log messages.
- """
-
- def __init__(self, fmt: str = '%(message)s', datefmt: str | None = None, show_logger_name: bool = False):
- super().__init__(fmt=fmt, datefmt=datefmt)
- self.show_logger_name = show_logger_name
-
- def format(self, record) -> str:
- record.message = record.getMessage()
- message_lines = self._style.format(record).split('\n')
- timestamp = self.formatTime(record, self.datefmt)
- log_level = record.levelname.ljust(8)
-
- if self.show_logger_name:
- # Truncate long logger names for readability
- logger_name = record.name if len(record.name) <= 20 else f'...{record.name[-17:]}'
- log_prefix = f'{timestamp} | {log_level} | {logger_name.ljust(20)} |'
- else:
- log_prefix = f'{timestamp} | {log_level} |'
-
- indent = ' ' * (len(log_prefix) + 1) # +1 for the space after prefix
+ Single-line messages use standard format.
+ Multi-line messages use boxed format with ┌─, │, └─ characters.
- lines = [f'{log_prefix} {message_lines[0]}']
- if len(message_lines) > 1:
- lines.extend([f'{indent}{line}' for line in message_lines[1:]])
-
- return '\n'.join(lines)
-
-
-class ColoredMultilineFormatter(MultilineFormatter):
- """Formatter that adds ANSI colors to multi-line log messages.
-
- Args:
- fmt: Log message format string.
- datefmt: Date/time format string.
- colors: Dictionary of ANSI color codes for each log level.
- show_logger_name: Show logger name in log messages.
+ Note: Escapes curly braces in messages to prevent format string errors.
"""
+ # Escape curly braces in message to prevent format string errors
+ message = record['message'].replace('{', '{{').replace('}', '}}')
+ lines = message.split('\n')
- RESET = '\033[0m'
-
- def __init__(
- self,
- fmt: str | None = None,
- datefmt: str | None = None,
- colors: dict[str, str] | None = None,
- show_logger_name: bool = False,
- ):
- super().__init__(fmt=fmt, datefmt=datefmt, show_logger_name=show_logger_name)
- self.COLORS = (
- colors
- if colors is not None
- else {
- 'DEBUG': '\033[90m',
- 'INFO': '\033[0m',
- 'WARNING': '\033[33m',
- 'ERROR': '\033[31m',
- 'CRITICAL': '\033[1m\033[31m',
- }
- )
+ # Format timestamp and level
+ time_str = record['time'].strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] # milliseconds
+ level_str = f'{record["level"].name: <8}'
- def format(self, record):
- lines = super().format(record).splitlines()
- log_color = self.COLORS.get(record.levelname, self.RESET)
- formatted_lines = [f'{log_color}{line}{self.RESET}' for line in lines]
- return '\n'.join(formatted_lines)
-
-
-def _create_console_handler(
- use_rich: bool = False,
- stream: Literal['stdout', 'stderr'] = 'stdout',
- console_width: int = 120,
- show_path: bool = False,
- show_logger_name: bool = False,
- date_format: str = '%Y-%m-%d %H:%M:%S',
- format: str = '%(message)s',
- colors: dict[str, str] | None = None,
-) -> logging.Handler:
- """Create a console logging handler.
+ # Single line messages - standard format
+ if len(lines) == 1:
+ result = f'{time_str} | {level_str} | {message}\n'
+ if record['exception']:
+ result += '{exception}'
+ return result
- Args:
- use_rich: If True, use RichHandler with color support.
- stream: Output stream
- console_width: Width of the console for Rich handler.
- show_path: Show file paths in log messages (Rich only).
- show_logger_name: Show logger name in log messages.
- date_format: Date/time format string.
- format: Log message format string.
- colors: Dictionary of ANSI color codes for each log level.
-
- Returns:
- Configured logging handler (RichHandler or StreamHandler).
- """
- # Determine the stream object
- stream_obj = sys.stdout if stream == 'stdout' else sys.stderr
-
- if use_rich:
- # Convert ANSI codes to Rich theme
- if colors:
- theme_dict = {}
- for level, ansi_code in colors.items():
- # Rich can parse ANSI codes directly!
- try:
- style = Style.from_ansi(ansi_code)
- theme_dict[f'logging.level.{level.lower()}'] = style
- except Exception:
- # Fallback to default if parsing fails
- pass
-
- theme = Theme(theme_dict) if theme_dict else None
- else:
- theme = None
-
- console = Console(width=console_width, theme=theme, file=stream_obj)
- handler = RichHandler(
- console=console,
- rich_tracebacks=True,
- omit_repeated_times=True,
- show_path=show_path,
- log_time_format=date_format,
- )
- handler.setFormatter(logging.Formatter(format))
- else:
- handler = logging.StreamHandler(stream=stream_obj)
- handler.setFormatter(
- ColoredMultilineFormatter(
- fmt=format,
- datefmt=date_format,
- colors=colors,
- show_logger_name=show_logger_name,
- )
- )
-
- return handler
-
-
-def _create_file_handler(
- log_file: str,
- max_file_size: int = 10_485_760,
- backup_count: int = 5,
- show_logger_name: bool = False,
- date_format: str = '%Y-%m-%d %H:%M:%S',
- format: str = '%(message)s',
-) -> RotatingFileHandler:
- """Create a rotating file handler to prevent huge log files.
+ # Multi-line messages - boxed format
+ indent = ' ' * len(time_str) # Match timestamp length
- Args:
- log_file: Path to the log file.
- max_file_size: Maximum size in bytes before rotation.
- backup_count: Number of backup files to keep.
- show_logger_name: Show logger name in log messages.
- date_format: Date/time format string.
- format: Log message format string.
-
- Returns:
- Configured RotatingFileHandler (without colors).
- """
+ # Build the boxed output
+ result = f'{time_str} | {level_str} | ┌─ {lines[0]}\n'
+ for line in lines[1:-1]:
+ result += f'{indent} | {" " * 8} | │ {line}\n'
+ result += f'{indent} | {" " * 8} | └─ {lines[-1]}\n'
- # Ensure parent directory exists
- log_path = Path(log_file)
- try:
- log_path.parent.mkdir(parents=True, exist_ok=True)
- except PermissionError as e:
- raise PermissionError(f"Cannot create log directory '{log_path.parent}': Permission denied") from e
+ # Add exception info if present
+ if record['exception']:
+ result += '\n{exception}'
- try:
- handler = RotatingFileHandler(
- log_file,
- maxBytes=max_file_size,
- backupCount=backup_count,
- encoding='utf-8',
- )
- except PermissionError as e:
- raise PermissionError(
- f"Cannot write to log file '{log_file}': Permission denied. "
- f'Choose a different location or check file permissions.'
- ) from e
-
- handler.setFormatter(
- MultilineFormatter(
- fmt=format,
- datefmt=date_format,
- show_logger_name=show_logger_name,
- )
- )
- return handler
+ return result
def _setup_logging(
- default_level: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] = 'INFO',
+ default_level: Literal['DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', 'CRITICAL'] = 'INFO',
log_file: str | None = None,
- use_rich_handler: bool = False,
console: bool | Literal['stdout', 'stderr'] = False,
max_file_size: int = 10_485_760,
backup_count: int = 5,
- date_format: str = '%Y-%m-%d %H:%M:%S',
- format: str = '%(message)s',
- console_width: int = 120,
- show_path: bool = False,
- show_logger_name: bool = False,
- colors: dict[str, str] | None = None,
+ verbose_tracebacks: bool = False,
) -> None:
"""Internal function to setup logging - use CONFIG.apply() instead.
- Configures the flixopt logger with console and/or file handlers.
- If no handlers are configured, adds NullHandler (library best practice).
+ Configures loguru logger with console and/or file handlers.
+ Multi-line messages are automatically formatted with box-style borders.
Args:
default_level: Logging level for the logger.
log_file: Path to log file (None to disable file logging).
- use_rich_handler: Use Rich for enhanced console output.
- console: Enable console logging.
- max_file_size: Maximum log file size before rotation.
+ console: Enable console logging (True/'stdout' or 'stderr').
+ max_file_size: Maximum log file size in bytes before rotation.
backup_count: Number of backup log files to keep.
- date_format: Date/time format for log messages.
- format: Log message format string.
- console_width: Console width for Rich handler.
- show_path: Show file paths in log messages (Rich only).
- show_logger_name: Show logger name in log messages.
- colors: ANSI color codes for each log level.
+ verbose_tracebacks: If True, show detailed tracebacks with variable values.
"""
- logger = logging.getLogger('flixopt')
- logger.setLevel(getattr(logging, default_level.upper()))
- logger.propagate = False # Prevent duplicate logs
- logger.handlers.clear()
+ # Remove all existing handlers
+ logger.remove()
- # Handle console parameter: False = disabled, True = stdout, 'stdout' = stdout, 'stderr' = stderr
+ # Console handler with multi-line formatting
if console:
- # Convert True to 'stdout', keep 'stdout'/'stderr' as-is
- stream = 'stdout' if console is True else console
- logger.addHandler(
- _create_console_handler(
- use_rich=use_rich_handler,
- stream=stream,
- console_width=console_width,
- show_path=show_path,
- show_logger_name=show_logger_name,
- date_format=date_format,
- format=format,
- colors=colors,
- )
+ stream = sys.stdout if console is True or console == 'stdout' else sys.stderr
+ logger.add(
+ stream,
+ format=_format_multiline,
+ level=default_level.upper(),
+ colorize=True,
+ backtrace=verbose_tracebacks,
+ diagnose=verbose_tracebacks,
+ enqueue=False,
)
+ # File handler with rotation (plain format for files)
if log_file:
- logger.addHandler(
- _create_file_handler(
- log_file=log_file,
- max_file_size=max_file_size,
- backup_count=backup_count,
- show_logger_name=show_logger_name,
- date_format=date_format,
- format=format,
- )
- )
+ log_path = Path(log_file)
+ try:
+ log_path.parent.mkdir(parents=True, exist_ok=True)
+ except PermissionError as e:
+ raise PermissionError(f"Cannot create log directory '{log_path.parent}': Permission denied") from e
- # Library best practice: NullHandler if no handlers configured
- if not logger.handlers:
- logger.addHandler(logging.NullHandler())
+ logger.add(
+ log_file,
+ format='{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {message}',
+ level=default_level.upper(),
+ colorize=False,
+ rotation=max_file_size,
+ retention=backup_count,
+ encoding='utf-8',
+ backtrace=verbose_tracebacks,
+ diagnose=verbose_tracebacks,
+ enqueue=False,
+ )
-def change_logging_level(level_name: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']):
- """Change the logging level for the flixopt logger and all its handlers.
+def change_logging_level(level_name: Literal['DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', 'CRITICAL']):
+ """Change the logging level for the flixopt logger.
.. deprecated:: 2.1.11
Use ``CONFIG.Logging.level = level_name`` and ``CONFIG.apply()`` instead.
@@ -785,11 +527,8 @@ def change_logging_level(level_name: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR'
DeprecationWarning,
stacklevel=2,
)
- logger = logging.getLogger('flixopt')
- logging_level = getattr(logging, level_name.upper())
- logger.setLevel(logging_level)
- for handler in logger.handlers:
- handler.setLevel(logging_level)
+ CONFIG.Logging.level = level_name.upper()
+ CONFIG.apply()
# Initialize default config
diff --git a/flixopt/core.py b/flixopt/core.py
index 0d70e255b..7f4d2a20f 100644
--- a/flixopt/core.py
+++ b/flixopt/core.py
@@ -3,7 +3,6 @@
It provides Datatypes, logging functionality, and some functions to transform data structures.
"""
-import logging
import warnings
from itertools import permutations
from typing import Any, Literal, Union
@@ -11,11 +10,10 @@
import numpy as np
import pandas as pd
import xarray as xr
+from loguru import logger
from .types import NumericOrBool
-logger = logging.getLogger('flixopt')
-
FlowSystemDimensions = Literal['time', 'period', 'scenario']
"""Possible dimensions of a FlowSystem."""
diff --git a/flixopt/effects.py b/flixopt/effects.py
index 02c850050..ebfc2c906 100644
--- a/flixopt/effects.py
+++ b/flixopt/effects.py
@@ -7,7 +7,6 @@
from __future__ import annotations
-import logging
import warnings
from collections import deque
from typing import TYPE_CHECKING, Literal
@@ -15,6 +14,7 @@
import linopy
import numpy as np
import xarray as xr
+from loguru import logger
from .features import ShareAllocationModel
from .structure import Element, ElementContainer, ElementModel, FlowSystemModel, Submodel, register_class_for_io
@@ -25,8 +25,6 @@
from .flow_system import FlowSystem
from .types import Effect_PS, Effect_TPS, Numeric_PS, Numeric_TPS, Scalar
-logger = logging.getLogger('flixopt')
-
@register_class_for_io
class Effect(Element):
diff --git a/flixopt/elements.py b/flixopt/elements.py
index 224cc0f9c..81f0b00aa 100644
--- a/flixopt/elements.py
+++ b/flixopt/elements.py
@@ -4,12 +4,12 @@
from __future__ import annotations
-import logging
import warnings
from typing import TYPE_CHECKING
import numpy as np
import xarray as xr
+from loguru import logger
from . import io as fx_io
from .config import CONFIG
@@ -36,8 +36,6 @@
Scalar,
)
-logger = logging.getLogger('flixopt')
-
@register_class_for_io
class Component(Element):
diff --git a/flixopt/features.py b/flixopt/features.py
index 519693885..fd9796ba1 100644
--- a/flixopt/features.py
+++ b/flixopt/features.py
@@ -5,7 +5,6 @@
from __future__ import annotations
-import logging
from typing import TYPE_CHECKING
import linopy
@@ -19,8 +18,6 @@
from .interface import InvestParameters, OnOffParameters, Piecewise
from .types import Numeric_PS, Numeric_TPS
-logger = logging.getLogger('flixopt')
-
class InvestmentModel(Submodel):
"""
diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py
index 081359076..cf112a608 100644
--- a/flixopt/flow_system.py
+++ b/flixopt/flow_system.py
@@ -4,7 +4,6 @@
from __future__ import annotations
-import logging
import warnings
from collections import defaultdict
from itertools import chain
@@ -13,6 +12,7 @@
import numpy as np
import pandas as pd
import xarray as xr
+from loguru import logger
from . import io as fx_io
from .config import CONFIG
@@ -34,8 +34,6 @@
from .types import Bool_TPS, Effect_TPS, Numeric_PS, Numeric_TPS, NumericOrBool
-logger = logging.getLogger('flixopt')
-
class FlowSystem(Interface, CompositeContainerMixin[Element]):
"""
diff --git a/flixopt/interface.py b/flixopt/interface.py
index e22ceebd5..f67f501ba 100644
--- a/flixopt/interface.py
+++ b/flixopt/interface.py
@@ -5,13 +5,13 @@
from __future__ import annotations
-import logging
import warnings
from typing import TYPE_CHECKING, Any
import numpy as np
import pandas as pd
import xarray as xr
+from loguru import logger
from .config import CONFIG
from .structure import Interface, register_class_for_io
@@ -23,9 +23,6 @@
from .types import Effect_PS, Effect_TPS, Numeric_PS, Numeric_TPS
-logger = logging.getLogger('flixopt')
-
-
@register_class_for_io
class Piece(Interface):
"""Define a single linear segment with specified domain boundaries.
diff --git a/flixopt/io.py b/flixopt/io.py
index e83738d89..ffeb2474e 100644
--- a/flixopt/io.py
+++ b/flixopt/io.py
@@ -2,7 +2,6 @@
import inspect
import json
-import logging
import os
import pathlib
import re
@@ -15,14 +14,13 @@
import pandas as pd
import xarray as xr
import yaml
+from loguru import logger
if TYPE_CHECKING:
import linopy
from .types import Numeric_TPS
-logger = logging.getLogger('flixopt')
-
def remove_none_and_empty(obj):
"""Recursively removes None and empty dicts and lists values from a dictionary or list."""
@@ -500,7 +498,7 @@ def document_linopy_model(model: linopy.Model, path: pathlib.Path | None = None)
}
if model.status == 'warning':
- logger.critical(f'The model has a warning status {model.status=}. Trying to extract infeasibilities')
+ logger.warning(f'The model has a warning status {model.status=}. Trying to extract infeasibilities')
try:
import io
from contextlib import redirect_stdout
@@ -513,7 +511,7 @@ def document_linopy_model(model: linopy.Model, path: pathlib.Path | None = None)
documentation['infeasible_constraints'] = f.getvalue()
except NotImplementedError:
- logger.critical(
+ logger.warning(
'Infeasible constraints could not get retrieved. This functionality is only availlable with gurobi'
)
documentation['infeasible_constraints'] = 'Not possible to retrieve infeasible constraints'
diff --git a/flixopt/linear_converters.py b/flixopt/linear_converters.py
index 046fcbd51..8f02e4f70 100644
--- a/flixopt/linear_converters.py
+++ b/flixopt/linear_converters.py
@@ -4,10 +4,10 @@
from __future__ import annotations
-import logging
from typing import TYPE_CHECKING
import numpy as np
+from loguru import logger
from .components import LinearConverter
from .core import TimeSeriesData
@@ -18,8 +18,6 @@
from .interface import OnOffParameters
from .types import Numeric_TPS
-logger = logging.getLogger('flixopt')
-
@register_class_for_io
class Boiler(LinearConverter):
@@ -620,11 +618,21 @@ def check_bounds(
if not np.all(value_arr > lower_arr):
logger.warning(
- f"'{element_label}.{parameter_label}' is equal or below the common lower bound {lower_bound}."
- f' {parameter_label}.min={np.min(value_arr)}; {parameter_label}={value}'
+ "'{}.{}' <= lower bound {}. {}.min={} shape={}",
+ element_label,
+ parameter_label,
+ lower_bound,
+ parameter_label,
+ float(np.min(value_arr)),
+ np.shape(value_arr),
)
if not np.all(value_arr < upper_arr):
logger.warning(
- f"'{element_label}.{parameter_label}' exceeds or matches the common upper bound {upper_bound}."
- f' {parameter_label}.max={np.max(value_arr)}; {parameter_label}={value}'
+ "'{}.{}' >= upper bound {}. {}.max={} shape={}",
+ element_label,
+ parameter_label,
+ upper_bound,
+ parameter_label,
+ float(np.max(value_arr)),
+ np.shape(value_arr),
)
diff --git a/flixopt/modeling.py b/flixopt/modeling.py
index 13b4c0e3e..ebe739a85 100644
--- a/flixopt/modeling.py
+++ b/flixopt/modeling.py
@@ -1,14 +1,11 @@
-import logging
-
import linopy
import numpy as np
import xarray as xr
+from loguru import logger
from .config import CONFIG
from .structure import Submodel
-logger = logging.getLogger('flixopt')
-
class ModelingUtilitiesAbstract:
"""Utility functions for modeling calculations - leveraging xarray for temporal data"""
diff --git a/flixopt/network_app.py b/flixopt/network_app.py
index 2cc80e7b0..446a2e7ce 100644
--- a/flixopt/network_app.py
+++ b/flixopt/network_app.py
@@ -1,10 +1,11 @@
from __future__ import annotations
-import logging
import socket
import threading
from typing import TYPE_CHECKING, Any
+from loguru import logger
+
try:
import dash_cytoscape as cyto
import dash_daq as daq
@@ -24,8 +25,6 @@
if TYPE_CHECKING:
from .flow_system import FlowSystem
-logger = logging.getLogger('flixopt')
-
# Configuration class for better organization
class VisualizationConfig:
diff --git a/flixopt/plotting.py b/flixopt/plotting.py
index 045cf7e99..27dbaf78c 100644
--- a/flixopt/plotting.py
+++ b/flixopt/plotting.py
@@ -26,7 +26,6 @@
from __future__ import annotations
import itertools
-import logging
import os
import pathlib
from typing import TYPE_CHECKING, Any, Literal
@@ -40,6 +39,7 @@
import plotly.graph_objects as go
import plotly.offline
import xarray as xr
+from loguru import logger
from .color_processing import process_colors
from .config import CONFIG
@@ -47,8 +47,6 @@
if TYPE_CHECKING:
import pyvis
-logger = logging.getLogger('flixopt')
-
# Define the colors for the 'portland' colorscale in matplotlib
_portland_colors = [
[12 / 255, 51 / 255, 131 / 255], # Dark blue
diff --git a/flixopt/results.py b/flixopt/results.py
index 3d9aedf62..eaff79fe4 100644
--- a/flixopt/results.py
+++ b/flixopt/results.py
@@ -2,7 +2,6 @@
import copy
import datetime
-import logging
import pathlib
import warnings
from typing import TYPE_CHECKING, Any, Literal
@@ -11,6 +10,7 @@
import numpy as np
import pandas as pd
import xarray as xr
+from loguru import logger
from . import io as fx_io
from . import plotting
@@ -28,9 +28,6 @@
from .core import FlowSystemDimensions
-logger = logging.getLogger('flixopt')
-
-
def load_mapping_from_file(path: pathlib.Path) -> dict[str, str | list[str]]:
"""Load color mapping from JSON or YAML file.
@@ -344,18 +341,19 @@ def flow_system(self) -> FlowSystem:
"""The restored flow_system that was used to create the calculation.
Contains all input parameters."""
if self._flow_system is None:
- old_level = logger.level
- logger.level = logging.CRITICAL
+ # Temporarily disable all logging to suppress messages during restoration
+ logger.disable('flixopt')
try:
self._flow_system = FlowSystem.from_dataset(self.flow_system_data)
self._flow_system._connect_network()
except Exception as e:
+ logger.enable('flixopt') # Re-enable before logging critical message
logger.critical(
f'Not able to restore FlowSystem from dataset. Some functionality is not availlable. {e}'
)
raise _FlowSystemRestorationError(f'Not able to restore FlowSystem from dataset. {e}') from e
finally:
- logger.level = old_level
+ logger.enable('flixopt')
return self._flow_system
def setup_colors(
@@ -1092,7 +1090,7 @@ def to_file(
else:
fx_io.document_linopy_model(self.model, path=paths.model_documentation)
- logger.info(f'Saved calculation results "{name}" to {paths.model_documentation.parent}')
+ logger.success(f'Saved calculation results "{name}" to {paths.model_documentation.parent}')
class _ElementResults:
diff --git a/flixopt/solvers.py b/flixopt/solvers.py
index e5db61192..a9a3afb46 100644
--- a/flixopt/solvers.py
+++ b/flixopt/solvers.py
@@ -4,13 +4,12 @@
from __future__ import annotations
-import logging
from dataclasses import dataclass, field
from typing import Any, ClassVar
-from flixopt.config import CONFIG
+from loguru import logger
-logger = logging.getLogger('flixopt')
+from flixopt.config import CONFIG
@dataclass
diff --git a/flixopt/structure.py b/flixopt/structure.py
index 2bce6aa52..9ddf46d31 100644
--- a/flixopt/structure.py
+++ b/flixopt/structure.py
@@ -6,11 +6,9 @@
from __future__ import annotations
import inspect
-import logging
import re
from dataclasses import dataclass
from difflib import get_close_matches
-from io import StringIO
from typing import (
TYPE_CHECKING,
Any,
@@ -23,8 +21,7 @@
import numpy as np
import pandas as pd
import xarray as xr
-from rich.console import Console
-from rich.pretty import Pretty
+from loguru import logger
from . import io as fx_io
from .core import TimeSeriesData, get_dataarray_stats
@@ -36,8 +33,6 @@
from .effects import EffectCollectionModel
from .flow_system import FlowSystem
-logger = logging.getLogger('flixopt')
-
CLASS_REGISTRY = {}
diff --git a/pyproject.toml b/pyproject.toml
index eb1fea0f8..4a60d7754 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ dependencies = [
"netcdf4 >= 1.6.1, < 2",
# Utilities
"pyyaml >= 6.0.0, < 7",
- "rich >= 13.0.0, < 15",
+ "loguru >= 0.7.0, < 1",
"tqdm >= 4.66.0, < 5",
"tomli >= 2.0.1, < 3; python_version < '3.11'", # Only needed with python 3.10 or earlier
# Default solver
diff --git a/tests/test_config.py b/tests/test_config.py
index a78330eb4..7de58e8aa 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1,10 +1,10 @@
"""Tests for the config module."""
-import logging
import sys
from pathlib import Path
import pytest
+from loguru import logger
from flixopt.config import _DEFAULTS, CONFIG, _setup_logging
@@ -26,7 +26,6 @@ def test_config_defaults(self):
"""Test that CONFIG has correct default values."""
assert CONFIG.Logging.level == 'INFO'
assert CONFIG.Logging.file is None
- assert CONFIG.Logging.rich is False
assert CONFIG.Logging.console is False
assert CONFIG.Modeling.big == 10_000_000
assert CONFIG.Modeling.epsilon == 1e-5
@@ -37,28 +36,27 @@ def test_config_defaults(self):
assert CONFIG.Solving.log_main_results is True
assert CONFIG.config_name == 'flixopt'
- def test_module_initialization(self):
+ def test_module_initialization(self, capfd):
"""Test that logging is initialized on module import."""
# Apply config to ensure handlers are initialized
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- # Should have at least one handler (file handler by default)
- assert len(logger.handlers) == 1
- # Should have a file handler with default settings
- assert isinstance(logger.handlers[0], logging.NullHandler)
+ # With default config (console=False, file=None), logs should not appear
+ logger.info('test message')
+ captured = capfd.readouterr()
+ assert 'test message' not in captured.out
+ assert 'test message' not in captured.err
- def test_config_apply_console(self):
+ def test_config_apply_console(self, capfd):
"""Test applying config with console logging enabled."""
CONFIG.Logging.console = True
CONFIG.Logging.level = 'DEBUG'
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- assert logger.level == logging.DEBUG
- # Should have a StreamHandler for console output
- assert any(isinstance(h, logging.StreamHandler) for h in logger.handlers)
- # Should not have NullHandler when console is enabled
- assert not any(isinstance(h, logging.NullHandler) for h in logger.handlers)
+ # Test that DEBUG level logs appear in console output
+ test_message = 'test debug message 12345'
+ logger.debug(test_message)
+ captured = capfd.readouterr()
+ assert test_message in captured.out or test_message in captured.err
def test_config_apply_file(self, tmp_path):
"""Test applying config with file logging enabled."""
@@ -67,34 +65,42 @@ def test_config_apply_file(self, tmp_path):
CONFIG.Logging.level = 'WARNING'
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- assert logger.level == logging.WARNING
- # Should have a RotatingFileHandler for file output
- from logging.handlers import RotatingFileHandler
+ # Test that WARNING level logs appear in the file
+ test_message = 'test warning message 67890'
+ logger.warning(test_message)
+ # Loguru may buffer, so we need to ensure the log is written
+ import time
- assert any(isinstance(h, RotatingFileHandler) for h in logger.handlers)
+ time.sleep(0.1) # Small delay to ensure write
+ assert log_file.exists()
+ log_content = log_file.read_text()
+ assert test_message in log_content
- def test_config_apply_rich(self):
- """Test applying config with rich logging enabled."""
- CONFIG.Logging.console = True
- CONFIG.Logging.rich = True
+ def test_config_apply_console_stderr(self, capfd):
+ """Test applying config with console logging to stderr."""
+ CONFIG.Logging.console = 'stderr'
+ CONFIG.Logging.level = 'INFO'
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- # Should have a RichHandler
- from rich.logging import RichHandler
+ # Test that INFO logs appear in stderr
+ test_message = 'test info to stderr 11111'
+ logger.info(test_message)
+ captured = capfd.readouterr()
+ assert test_message in captured.err
- assert any(isinstance(h, RichHandler) for h in logger.handlers)
-
- def test_config_apply_multiple_changes(self):
+ def test_config_apply_multiple_changes(self, capfd):
"""Test applying multiple config changes at once."""
CONFIG.Logging.console = True
CONFIG.Logging.level = 'ERROR'
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- assert logger.level == logging.ERROR
- assert any(isinstance(h, logging.StreamHandler) for h in logger.handlers)
+ # Test that ERROR level logs appear but lower levels don't
+ logger.warning('warning should not appear')
+ logger.error('error should appear 22222')
+ captured = capfd.readouterr()
+ output = captured.out + captured.err
+ assert 'warning should not appear' not in output
+ assert 'error should appear 22222' in output
def test_config_to_dict(self):
"""Test converting CONFIG to dictionary."""
@@ -107,7 +113,6 @@ def test_config_to_dict(self):
assert config_dict['logging']['level'] == 'DEBUG'
assert config_dict['logging']['console'] is True
assert config_dict['logging']['file'] is None
- assert config_dict['logging']['rich'] is False
assert 'modeling' in config_dict
assert config_dict['modeling']['big'] == 10_000_000
assert 'solving' in config_dict
@@ -172,36 +177,41 @@ def test_config_load_from_file_partial(self, tmp_path):
# Verify console setting is preserved (not in YAML)
assert CONFIG.Logging.console is True
- def test_setup_logging_silent_default(self):
+ def test_setup_logging_silent_default(self, capfd):
"""Test that _setup_logging creates silent logger by default."""
_setup_logging()
- logger = logging.getLogger('flixopt')
- # Should have NullHandler when console=False and log_file=None
- assert any(isinstance(h, logging.NullHandler) for h in logger.handlers)
- assert not logger.propagate
+ # With default settings, logs should not appear
+ logger.info('should not appear')
+ captured = capfd.readouterr()
+ assert 'should not appear' not in captured.out
+ assert 'should not appear' not in captured.err
- def test_setup_logging_with_console(self):
+ def test_setup_logging_with_console(self, capfd):
"""Test _setup_logging with console output."""
_setup_logging(console=True, default_level='DEBUG')
- logger = logging.getLogger('flixopt')
- assert logger.level == logging.DEBUG
- assert any(isinstance(h, logging.StreamHandler) for h in logger.handlers)
+ # Test that DEBUG logs appear in console
+ test_message = 'debug console test 33333'
+ logger.debug(test_message)
+ captured = capfd.readouterr()
+ assert test_message in captured.out or test_message in captured.err
- def test_setup_logging_clears_handlers(self):
+ def test_setup_logging_clears_handlers(self, capfd):
"""Test that _setup_logging clears existing handlers."""
- logger = logging.getLogger('flixopt')
-
- # Add a dummy handler
- dummy_handler = logging.NullHandler()
- logger.addHandler(dummy_handler)
- _ = len(logger.handlers)
-
+ # Setup a handler first
_setup_logging(console=True)
- # Should have cleared old handlers and added new one
- assert dummy_handler not in logger.handlers
+ # Call setup again with different settings - should clear and re-add
+ _setup_logging(console=True, default_level='ERROR')
+
+ # Verify new settings work: ERROR logs appear but INFO doesn't
+ logger.info('info should not appear')
+ logger.error('error should appear 44444')
+ captured = capfd.readouterr()
+ output = captured.out + captured.err
+ assert 'info should not appear' not in output
+ assert 'error should appear 44444' in output
def test_change_logging_level_removed(self):
"""Test that change_logging_level function is deprecated but still exists."""
@@ -231,40 +241,43 @@ def test_public_api(self):
# merge_configs should not exist (was removed)
assert not hasattr(config, 'merge_configs')
- def test_logging_levels(self):
+ def test_logging_levels(self, capfd):
"""Test all valid logging levels."""
- levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
+ levels = ['DEBUG', 'INFO', 'SUCCESS', 'WARNING', 'ERROR', 'CRITICAL']
for level in levels:
CONFIG.Logging.level = level
CONFIG.Logging.console = True
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- assert logger.level == getattr(logging, level)
-
- def test_logger_propagate_disabled(self):
- """Test that logger propagation is disabled."""
- CONFIG.apply()
- logger = logging.getLogger('flixopt')
- assert not logger.propagate
+ # Test that logs at the configured level appear
+ test_message = f'test message at {level} 55555'
+ getattr(logger, level.lower())(test_message)
+ captured = capfd.readouterr()
+ output = captured.out + captured.err
+ assert test_message in output, f'Expected {level} message to appear'
def test_file_handler_rotation(self, tmp_path):
- """Test that file handler uses rotation."""
+ """Test that file handler rotation configuration is accepted."""
log_file = tmp_path / 'rotating.log'
CONFIG.Logging.file = str(log_file)
+ CONFIG.Logging.max_file_size = 1024
+ CONFIG.Logging.backup_count = 2
CONFIG.apply()
- logger = logging.getLogger('flixopt')
- from logging.handlers import RotatingFileHandler
+ # Write some logs
+ for i in range(10):
+ logger.info(f'Log message {i}')
+
+ # Verify file logging works
+ import time
- file_handlers = [h for h in logger.handlers if isinstance(h, RotatingFileHandler)]
- assert len(file_handlers) == 1
+ time.sleep(0.1)
+ assert log_file.exists(), 'Log file should be created'
- handler = file_handlers[0]
- # Check rotation settings
- assert handler.maxBytes == 10_485_760 # 10MB
- assert handler.backupCount == 5
+ # Verify configuration values are preserved
+ assert CONFIG.Logging.max_file_size == 1024
+ assert CONFIG.Logging.backup_count == 2
def test_custom_config_yaml_complete(self, tmp_path):
"""Test loading a complete custom configuration."""
@@ -274,7 +287,6 @@ def test_custom_config_yaml_complete(self, tmp_path):
logging:
level: CRITICAL
console: true
- rich: true
file: /tmp/custom.log
modeling:
big: 50000000
@@ -293,7 +305,6 @@ def test_custom_config_yaml_complete(self, tmp_path):
assert CONFIG.config_name == 'my_custom_config'
assert CONFIG.Logging.level == 'CRITICAL'
assert CONFIG.Logging.console is True
- assert CONFIG.Logging.rich is True
assert CONFIG.Logging.file == '/tmp/custom.log'
assert CONFIG.Modeling.big == 50000000
assert float(CONFIG.Modeling.epsilon) == 1e-4
@@ -302,9 +313,22 @@ def test_custom_config_yaml_complete(self, tmp_path):
assert CONFIG.Solving.time_limit_seconds == 900
assert CONFIG.Solving.log_main_results is False
- # Verify logging was applied
- logger = logging.getLogger('flixopt')
- assert logger.level == logging.CRITICAL
+ # Verify logging was applied to both console and file
+ import time
+
+ test_message = 'critical test message 66666'
+ logger.critical(test_message)
+ time.sleep(0.1) # Small delay to ensure write
+ # Check file exists and contains message
+ log_file_path = tmp_path / 'custom.log'
+ if not log_file_path.exists():
+ # File might be at /tmp/custom.log as specified in config
+ import os
+
+ log_file_path = os.path.expanduser('/tmp/custom.log')
+ # We can't reliably test the file at /tmp/custom.log in tests
+ # So just verify critical level messages would appear at this level
+ assert CONFIG.Logging.level == 'CRITICAL'
def test_config_file_with_console_and_file(self, tmp_path):
"""Test configuration with both console and file logging enabled."""
@@ -314,21 +338,22 @@ def test_config_file_with_console_and_file(self, tmp_path):
logging:
level: INFO
console: true
- rich: false
file: {log_file}
"""
config_file.write_text(config_content)
CONFIG.load_from_file(config_file)
- logger = logging.getLogger('flixopt')
- # Should have both StreamHandler and RotatingFileHandler
- from logging.handlers import RotatingFileHandler
+ # Verify logging to both console and file works
+ import time
- assert any(isinstance(h, logging.StreamHandler) for h in logger.handlers)
- assert any(isinstance(h, RotatingFileHandler) for h in logger.handlers)
- # Should NOT have NullHandler when console/file are enabled
- assert not any(isinstance(h, logging.NullHandler) for h in logger.handlers)
+ test_message = 'info test both outputs 77777'
+ logger.info(test_message)
+ time.sleep(0.1) # Small delay to ensure write
+ # Verify file logging works
+ assert log_file.exists()
+ log_content = log_file.read_text()
+ assert test_message in log_content
def test_config_to_dict_roundtrip(self, tmp_path):
"""Test that config can be saved to dict, modified, and restored."""
@@ -416,7 +441,6 @@ def test_logger_actually_logs(self, tmp_path):
CONFIG.Logging.level = 'DEBUG'
CONFIG.apply()
- logger = logging.getLogger('flixopt')
test_message = 'Test log message from config test'
logger.debug(test_message)
@@ -443,8 +467,7 @@ def test_config_reset(self):
"""Test that CONFIG.reset() restores all defaults."""
# Modify all config values
CONFIG.Logging.level = 'DEBUG'
- CONFIG.Logging.console = False
- CONFIG.Logging.rich = True
+ CONFIG.Logging.console = True
CONFIG.Logging.file = '/tmp/test.log'
CONFIG.Modeling.big = 99999999
CONFIG.Modeling.epsilon = 1e-8
@@ -461,7 +484,6 @@ def test_config_reset(self):
# Verify all values are back to defaults
assert CONFIG.Logging.level == 'INFO'
assert CONFIG.Logging.console is False
- assert CONFIG.Logging.rich is False
assert CONFIG.Logging.file is None
assert CONFIG.Modeling.big == 10_000_000
assert CONFIG.Modeling.epsilon == 1e-5
@@ -472,10 +494,23 @@ def test_config_reset(self):
assert CONFIG.Solving.log_main_results is True
assert CONFIG.config_name == 'flixopt'
- # Verify logging was also reset
- logger = logging.getLogger('flixopt')
- assert logger.level == logging.INFO
- assert isinstance(logger.handlers[0], logging.NullHandler)
+ # Verify logging was also reset (default is no logging to console/file)
+ # Test that logs don't appear with default config
+ from io import StringIO
+
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ sys.stdout = StringIO()
+ sys.stderr = StringIO()
+ try:
+ logger.info('should not appear after reset')
+ stdout_content = sys.stdout.getvalue()
+ stderr_content = sys.stderr.getvalue()
+ assert 'should not appear after reset' not in stdout_content
+ assert 'should not appear after reset' not in stderr_content
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
def test_reset_matches_class_defaults(self):
"""Test that reset() values match the _DEFAULTS constants.
@@ -486,7 +521,6 @@ def test_reset_matches_class_defaults(self):
# Modify all values to something different
CONFIG.Logging.level = 'CRITICAL'
CONFIG.Logging.file = '/tmp/test.log'
- CONFIG.Logging.rich = True
CONFIG.Logging.console = True
CONFIG.Modeling.big = 999999
CONFIG.Modeling.epsilon = 1e-10
@@ -509,7 +543,6 @@ def test_reset_matches_class_defaults(self):
# Verify reset() restored exactly the _DEFAULTS values
assert CONFIG.Logging.level == _DEFAULTS['logging']['level']
assert CONFIG.Logging.file == _DEFAULTS['logging']['file']
- assert CONFIG.Logging.rich == _DEFAULTS['logging']['rich']
assert CONFIG.Logging.console == _DEFAULTS['logging']['console']
assert CONFIG.Modeling.big == _DEFAULTS['modeling']['big']
assert CONFIG.Modeling.epsilon == _DEFAULTS['modeling']['epsilon']