diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d21c0017..68ea854ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,8 +26,15 @@ Classify the change according to the following categories: ##### Removed ### Patches -## v3.17.1 +## v3.17.2 ### Minor Updates +##### Added +- Added `/job/get_timeseries_table` endpoint which takes a list of run_uuid's and creates a timeseries results table spreadsheet to download in response +- New custom table option `custom_timeseries_energy_demand` for endpoint `/job/get_timeseries_table`. +##### Changed +- Increased `monthly_totals_kwh` maximum value to 1.0e9. + +## v3.17.1 ##### Changed - For the `/get_load_metrics` endpoint, update response field names to be consistent with `/simulated_load` for `load_type=electric`: `annual_kwh`, `max_kw` (annual peak load), `monthly_totals_kwh`, and `monthly_peaks_kw`. In a future update, this endpoint will take `load_type` as an input and return the load metrics with consistent units as `/simulated_load` for heating and cooling `load_type`s, such as `monthly_mmbtu` for the heating load types. diff --git a/reoptjl/custom_timeseries_table_config.py b/reoptjl/custom_timeseries_table_config.py new file mode 100644 index 000000000..c2fecf8bf --- /dev/null +++ b/reoptjl/custom_timeseries_table_config.py @@ -0,0 +1,243 @@ +# custom_timeseries_table_config.py +from reoptjl.custom_timeseries_table_helpers import safe_get_list, safe_get_value, safe_get + +""" +Timeseries Table Configuration +=============================== +This file defines configurations for timeseries Excel tables that display hourly or sub-hourly data. +Each configuration specifies which columns to include, how to extract the data, and how to format the Excel output. + +Naming Convention: +------------------ +Structure: custom_timeseries_ + +- `custom_timeseries_`: Prefix indicating a timeseries table configuration +- ``: Descriptive name for the specific timeseries configuration + +Examples: +- custom_timeseries_energy_demand: Configuration for energy and demand rate timeseries +- custom_timeseries_emissions: Configuration for emissions timeseries +- custom_timeseries_loads: Configuration for load profiles + +Guidelines: +- Use lowercase letters and underscores +- Keep names descriptive and concise +- Each configuration is a list of column dictionaries + +Column Dictionary Structure: +----------------------------- +Each column configuration should have: +{ + "label": str, # Column header text + "key": str, # Unique identifier for the column + "timeseries_path": str, # Dot-separated path to data in the results JSON (e.g., "outputs.ElectricLoad.load_series_kw") + "is_base_column": bool, # True if column comes from first scenario only, False if repeated for all scenarios + "units": str, # Optional: Units to display in header (e.g., "($/kWh)", "(kW)") + "column_width": float, # Optional: Column width in Excel (default: 15) + "num_format": str, # Optional: Excel number format (e.g., '#,##0', 'm/d/yyyy h:mm') +} + +Formatting Configuration: +------------------------- +Each configuration should also include a "formatting" dictionary with Excel formatting options: +{ + "worksheet_name": str, # Name of the Excel worksheet + "base_header_color": str, # Hex color for base column headers (e.g., '#0B5E90') + "scenario_header_colors": list[str], # List of hex colors for scenario column headers + "header_format": dict, # xlsxwriter format options for headers (bold, font_color, border, etc.) + "data_format": dict, # xlsxwriter format options for data cells (border, align, valign) + "freeze_panes": tuple, # Row and column to freeze (e.g., (1, 0) freezes top row) +} + +Special Column Types: +--------------------- +1. DateTime column: Must have key="datetime" and will be auto-generated based on year and time_steps_per_hour +2. Base columns: Set is_base_column=True for columns that only use data from the first run_uuid +3. Scenario columns: Set is_base_column=False for columns that repeat for each run_uuid + +Rate Name Headers: +------------------ +For scenario columns (is_base_column=False), the column header will automatically include the rate name +from inputs.ElectricTariff.urdb_metadata.rate_name for each scenario. +""" + +# Formatting configuration for energy and demand rate timeseries +timeseries_energy_demand_formatting = { + "worksheet_name": "Timeseries Data", + "base_header_color": "#0B5E90", + "scenario_header_colors": [ + "#50AEE9", # Blue (first rate) + '#2E7D32', # Green (second rate) + '#D32F2F', # Red (third rate) + '#F57C00', # Orange (fourth rate) + '#7B1FA2', # Purple (fifth rate) + '#0097A7', # Cyan (sixth rate) + '#C2185B', # Pink (seventh rate) + '#5D4037', # Brown (eighth rate) + ], + "header_format": { + 'bold': True, + 'font_color': 'white', + 'border': 1, + 'align': 'center', + 'valign': 'vcenter', + 'text_wrap': True + }, + "data_format": { + 'border': 1, + 'align': 'center', + 'valign': 'vcenter' + }, + "freeze_panes": (1, 0) # Freeze top row +} + +# Configuration for energy and demand rate timeseries +custom_timeseries_energy_demand = { + "columns": [ + { + "label": "Date Timestep", + "key": "datetime", + "timeseries_path": lambda df: safe_get(df, "inputs.ElectricLoad.year"), # Used to generate datetime column based on year and time_steps_per_hour + "is_base_column": True, + "column_width": 15, + "num_format": "m/d/yyyy h:mm" + }, + { + "label": "Load (kW)", + "key": "load_kw", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricLoad.load_series_kw"), + "is_base_column": True, + "column_width": 11, + "num_format": "#,##0" + }, + { + "label": "Peak Monthly Load (kW)", + "key": "peak_monthly_load_kw", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricLoad.monthly_peaks_kw"), # 12-element array, needs special handling to repeat for each timestep + "is_base_column": True, + "column_width": 15, + "num_format": "#,##0" + }, + { + "label": "Energy Charge", + "key": "energy_charge", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricTariff.energy_rate_average_series"), + "is_base_column": False, # Repeats for each scenario + "units": "($/kWh)", + "column_width": 25, + "num_format": "#,##0.00000" + }, + { + "label": "Demand Charge", + "key": "demand_charge", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricTariff.demand_rate_average_series"), + "is_base_column": False, # Repeats for each scenario + "units": "($/kW)", + "column_width": 25, + "num_format": "#,##0.00" + } + ], + "formatting": timeseries_energy_demand_formatting +} + +# Example configuration for emissions timeseries (can be expanded as needed) +custom_timeseries_emissions = { + "columns": [ + { + "label": "Date Timestep", + "key": "datetime", + "timeseries_path": lambda df: safe_get(df, "inputs.ElectricLoad.year"), + "is_base_column": True, + "column_width": 18, + "num_format": "m/d/yyyy h:mm" + }, + { + "label": "Grid Emissions", + "key": "grid_emissions", + "timeseries_path": lambda df: safe_get(df, "inputs.ElectricUtility.emissions_factor_series_lb_CO2_per_kwh"), + "is_base_column": False, + "units": "(lb CO2/kWh)", + "column_width": 15, + "num_format": "#,##0.00000" + }, + { + "label": "Grid Energy", + "key": "grid_to_load", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricUtility.electric_to_load_series_kw"), + "is_base_column": False, + "units": "(kWh)", + "column_width": 15, + "num_format": "#,##0.00" + } + ], + "formatting": { + "worksheet_name": "Emissions Data", + "base_header_color": "#0B5E90", + "scenario_header_colors": ["#50AEE9", '#2E7D32', '#D32F2F', '#F57C00', '#7B1FA2', '#0097A7', '#C2185B', '#5D4037'], + "header_format": { + 'bold': True, + 'font_color': 'white', + 'border': 1, + 'align': 'center', + 'valign': 'vcenter', + 'text_wrap': True + }, + "data_format": { + 'border': 1, + 'align': 'center', + 'valign': 'vcenter' + }, + "freeze_panes": (1, 0) + } +} + +# Example configuration for load profiles (can be expanded as needed) +custom_timeseries_loads = { + "columns": [ + { + "label": "Date Timestep", + "key": "datetime", + "timeseries_path": lambda df: safe_get(df, "inputs.ElectricLoad.year"), + "is_base_column": True, + "column_width": 18, + "num_format": "m/d/yyyy h:mm" + }, + { + "label": "Total Load", + "key": "total_load", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricLoad.load_series_kw"), + "is_base_column": True, + "units": "(kW)", + "column_width": 15, + "num_format": "#,##0.00" + }, + { + "label": "Critical Load", + "key": "critical_load", + "timeseries_path": lambda df: safe_get(df, "outputs.ElectricLoad.critical_load_series_kw"), + "is_base_column": True, + "units": "(kW)", + "column_width": 15, + "num_format": "#,##0.00" + } + ], + "formatting": { + "worksheet_name": "Load Profiles", + "base_header_color": "#0B5E90", + "scenario_header_colors": ["#50AEE9", '#2E7D32', '#D32F2F', '#F57C00', '#7B1FA2', '#0097A7', '#C2185B', '#5D4037'], + "header_format": { + 'bold': True, + 'font_color': 'white', + 'border': 1, + 'align': 'center', + 'valign': 'vcenter', + 'text_wrap': True + }, + "data_format": { + 'border': 1, + 'align': 'center', + 'valign': 'vcenter' + }, + "freeze_panes": (1, 0) + } +} diff --git a/reoptjl/custom_timeseries_table_helpers.py b/reoptjl/custom_timeseries_table_helpers.py new file mode 100644 index 000000000..8b3a41bac --- /dev/null +++ b/reoptjl/custom_timeseries_table_helpers.py @@ -0,0 +1,163 @@ +# custom_timeseries_table_helpers.py +from typing import Dict, Any, List +from datetime import datetime, timedelta +import calendar + +def generate_datetime_column(year: int, time_steps_per_hour: int) -> List[str]: + """ + Generate datetime strings for the first column based on year and time_steps_per_hour. + + Args: + year: The year for the datetime series + time_steps_per_hour: Number of time steps per hour (1, 2, or 4) + + Returns: + List of datetime strings formatted as "M/D/YYYY H:MM" + """ + # Check if leap year and adjust days accordingly + is_leap = calendar.isleap(year) + total_days = 365 # Always use 365 days, even for leap years + + # Calculate time step increment in minutes + minutes_per_step = 60 // time_steps_per_hour + + datetime_list = [] + start_date = datetime(year, 1, 1, 0, 0) + + # Calculate total number of time steps + total_steps = total_days * 24 * time_steps_per_hour + + for step in range(total_steps): + current_time = start_date + timedelta(minutes=step * minutes_per_step) + # Format: M/D/YYYY H:MM (Windows-compatible formatting) + month = current_time.month + day = current_time.day + year = current_time.year + hour = current_time.hour + minute = current_time.minute + formatted_time = f"{month}/{day}/{year} {hour}:{minute:02d}" + datetime_list.append(formatted_time) + + return datetime_list + + +def get_monthly_peak_for_timestep(timestep_index: int, monthly_peaks: List[float], time_steps_per_hour: int) -> float: + """ + Get the monthly peak value for a given timestep index. + + Args: + timestep_index: The index of the current timestep + monthly_peaks: List of 12 monthly peak values + time_steps_per_hour: Number of time steps per hour + + Returns: + The monthly peak value for the month containing this timestep + """ + # Calculate which month this timestep belongs to + # Approximate days per month + days_in_months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + + steps_per_day = 24 * time_steps_per_hour + cumulative_steps = 0 + + for month_idx, days in enumerate(days_in_months): + cumulative_steps += days * steps_per_day + if timestep_index < cumulative_steps: + return monthly_peaks[month_idx] if month_idx < len(monthly_peaks) else 0 + + # Default to last month if we're beyond December + return monthly_peaks[-1] if monthly_peaks else 0 + + +def safe_get_list(data: Dict[str, Any], key: str, default: List = None) -> List: + """ + Safely get a list value from nested dictionary. + + Args: + data: The dictionary to search + key: Dot-separated key path (e.g., "outputs.ElectricLoad.load_series_kw") + default: Default value if key not found + + Returns: + The found list or default value + """ + if default is None: + default = [] + + keys = key.split('.') + current = data + + try: + for k in keys: + if isinstance(current, dict): + current = current.get(k) + else: + return default + + if current is None: + return default + + return current if isinstance(current, list) else default + except (KeyError, TypeError, AttributeError): + return default + + +def safe_get_value(data: Dict[str, Any], key: str, default: Any = None) -> Any: + """ + Safely get a value from nested dictionary. + + Args: + data: The dictionary to search + key: Dot-separated key path + default: Default value if key not found + + Returns: + The found value or default + """ + keys = key.split('.') + current = data + + try: + for k in keys: + if isinstance(current, dict): + current = current.get(k) + else: + return default + + if current is None: + return default + + return current + except (KeyError, TypeError, AttributeError): + return default + + +def safe_get(data: Dict[str, Any], key: str, default: Any = None) -> Any: + """ + Safely get any value from nested dictionary (works for lists, values, etc.). + This is a general-purpose function that returns whatever is found at the path. + + Args: + data: The dictionary to search + key: Dot-separated key path + default: Default value if key not found + + Returns: + The found value (could be list, dict, string, number, etc.) or default + """ + keys = key.split('.') + current = data + + try: + for k in keys: + if isinstance(current, dict): + current = current.get(k) + else: + return default + + if current is None: + return default + + return current + except (KeyError, TypeError, AttributeError): + return default diff --git a/reoptjl/migrations/0111_alter_electricloadinputs_monthly_totals_kwh.py b/reoptjl/migrations/0111_alter_electricloadinputs_monthly_totals_kwh.py new file mode 100644 index 000000000..a816133c4 --- /dev/null +++ b/reoptjl/migrations/0111_alter_electricloadinputs_monthly_totals_kwh.py @@ -0,0 +1,20 @@ +# Generated by Django 4.2.26 on 2025-12-01 23:45 + +import django.contrib.postgres.fields +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('reoptjl', '0110_electricloadinputs_monthly_peaks_kw_and_more'), + ] + + operations = [ + migrations.AlterField( + model_name='electricloadinputs', + name='monthly_totals_kwh', + field=django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(blank=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1000000000.0)]), blank=True, default=list, help_text="Monthly site energy consumption (an array 12 entries long), in kWh, used to scale either loads_kw series (with normalize_and_scale_load_profile_input) or the simulated default building load profile for the site's climate zone", size=None), + ), + ] diff --git a/reoptjl/models.py b/reoptjl/models.py index 4d134884e..4d0a27450 100644 --- a/reoptjl/models.py +++ b/reoptjl/models.py @@ -1382,7 +1382,7 @@ class ElectricLoadInputs(BaseModel, models.Model): models.FloatField( validators=[ MinValueValidator(0), - MaxValueValidator(1.0e8) + MaxValueValidator(1.0e9) ], blank=True ), diff --git a/reoptjl/urls.py b/reoptjl/urls.py index 16e40949e..6de54a354 100644 --- a/reoptjl/urls.py +++ b/reoptjl/urls.py @@ -29,5 +29,6 @@ re_path(r'^pv_cost_defaults/?$', views.pv_cost_defaults), re_path(r'^summary_by_runuuids/?$', views.summary_by_runuuids), re_path(r'^link_run_to_portfolios/?$', views.link_run_uuids_to_portfolio_uuid), - re_path(r'^get_load_metrics/?$', views.get_load_metrics) + re_path(r'^get_load_metrics/?$', views.get_load_metrics), + re_path(r'^job/get_timeseries_table/?$', views.get_timeseries_table) ] diff --git a/reoptjl/views.py b/reoptjl/views.py index 22d152ca6..efe4a7733 100644 --- a/reoptjl/views.py +++ b/reoptjl/views.py @@ -26,6 +26,7 @@ import pandas as pd import json import logging +from datetime import datetime from reoptjl.custom_table_helpers import flatten_dict, clean_data_dict, sum_vectors, colnum_string from reoptjl.custom_table_config import * @@ -2362,4 +2363,241 @@ def get_bau_column(col): ############################################################################################################################## ################################################### END Results Table ######################################################### +############################################################################################################################## + +############################################################################################################################## +################################################# START Get Timeseries Table ##################################################### +############################################################################################################################## + +def get_timeseries_table(request: Any) -> HttpResponse: + """ + Generate an Excel file with timeseries data for one or more scenarios. + Accepts multiple run_uuid values via GET request parameters and an optional table_config_name. + + Query Parameters: + - run_uuid[0], run_uuid[1], etc.: UUIDs of scenarios to include + - table_config_name: Name of configuration to use (default: 'custom_timeseries_energy_demand') + + The columns, formatting, and data extraction are defined in custom_timeseries_table_config.py + """ + from reoptjl.custom_timeseries_table_helpers import ( + generate_datetime_column, + get_monthly_peak_for_timestep, + safe_get_list, + safe_get_value + ) + import reoptjl.custom_timeseries_table_config as timeseries_config + + if request.method != 'GET': + return JsonResponse({"Error": "Method not allowed. This endpoint only supports GET requests."}, status=405) + + try: + # Get configuration name from request (default to energy_demand) + table_config_name = request.GET.get('table_config_name', 'custom_timeseries_energy_demand') + + # Load the configuration + target_config = getattr(timeseries_config, table_config_name, None) + if not target_config: + return JsonResponse({"Error": f"Invalid table configuration: {table_config_name}. Please provide a valid configuration name."}, status=400) + + # Extract configuration components + columns_config = target_config.get('columns', []) + formatting_config = target_config.get('formatting', {}) + + # Extract run_uuid values from GET parameters + run_uuids = [request.GET[key] for key in request.GET.keys() if key.startswith('run_uuid[')] + + if not run_uuids: + return JsonResponse({"Error": "No run_uuids provided. Please include at least one run_uuid in the request."}, status=400) + + # Validate UUIDs + for r_uuid in run_uuids: + try: + uuid.UUID(r_uuid) + except ValueError: + return JsonResponse({"Error": f"Invalid UUID format: {r_uuid}. Ensure that each run_uuid is a valid UUID."}, status=400) + + # Fetch data for all run_uuids + scenarios_data = [] + for run_uuid in run_uuids: + response = results(request, run_uuid) + if response.status_code == 200: + data = json.loads(response.content) + scenarios_data.append({ + 'run_uuid': run_uuid, + 'data': data + }) + else: + return JsonResponse({"Error": f"Failed to fetch data for run_uuid {run_uuid}"}, status=500) + + if not scenarios_data: + return JsonResponse({"Error": "No valid scenario data found."}, status=500) + + # Use first scenario for base data extraction + first_scenario = scenarios_data[0]['data'] + + # Extract metadata from first scenario + year = safe_get_value(first_scenario, 'inputs.ElectricLoad.year', 2017) + time_steps_per_hour = safe_get_value(first_scenario, 'inputs.Settings.time_steps_per_hour', 1) + + # Generate datetime column + datetime_col = generate_datetime_column(year, time_steps_per_hour) + + # Log for debugging + log.info(f"get_timeseries_table - year: {year}, time_steps_per_hour: {time_steps_per_hour}") + log.info(f"get_timeseries_table - datetime_col length: {len(datetime_col)}") + + # Create Excel workbook + output = io.BytesIO() + workbook = xlsxwriter.Workbook(output, {'in_memory': True}) + worksheet_name = formatting_config.get('worksheet_name') + worksheet = workbook.add_worksheet(worksheet_name) + + # Create formats from configuration + base_header_color = formatting_config.get('base_header_color') + scenario_header_colors = formatting_config.get('scenario_header_colors') + header_format_opts = formatting_config.get('header_format') + base_data_opts = formatting_config.get('data_format') + + # Create base header format + base_header_opts = {'bg_color': base_header_color} + base_header_opts.update(header_format_opts) + base_header_format = workbook.add_format(base_header_opts) + + # Create scenario header formats + scenario_header_formats = [] + for color in scenario_header_colors: + scenario_opts = {'bg_color': color} + scenario_opts.update(header_format_opts) + scenario_header_formats.append(workbook.add_format(scenario_opts)) + + # Build column format cache + column_formats = {} + for col in columns_config: + num_format = col.get('num_format', '') + if num_format: + format_opts = base_data_opts.copy() + format_opts['num_format'] = num_format + column_formats[col['key']] = workbook.add_format(format_opts) + else: + column_formats[col['key']] = workbook.add_format(base_data_opts) + + # Set column widths and write headers + col_idx = 0 + base_columns = [col for col in columns_config if col.get('is_base_column', False)] + scenario_columns = [col for col in columns_config if not col.get('is_base_column', False)] + + # Write base column headers + for col in base_columns: + width = col.get('column_width', 15) + worksheet.set_column(col_idx, col_idx, width) + worksheet.write(0, col_idx, col['label'], base_header_format) + col_idx += 1 + + # Write scenario column headers (repeated for each scenario) + for scenario_idx, scenario in enumerate(scenarios_data): + # Get rate name from urdb_metadata for header + rate_name = safe_get_value(scenario['data'], 'inputs.ElectricTariff.urdb_metadata.rate_name', f'Scenario {scenario_idx + 1}') + + # Use different colored header for each scenario (cycle through colors) + scenario_header = scenario_header_formats[scenario_idx % len(scenario_header_formats)] + + for col in scenario_columns: + width = col.get('column_width', 15) + worksheet.set_column(col_idx, col_idx, width) + + # Build header text with units and rate name + header_text = col['label'] + if col.get('units'): + header_text = f"{header_text}: \n{rate_name} {col['units']}" + else: + header_text = f"{header_text}: \n{rate_name}" + + worksheet.write(0, col_idx, header_text, scenario_header) + col_idx += 1 + + # Extract base column data from first scenario + base_column_data = {} + for col in base_columns: + if col['key'] == 'datetime': + # Special handling for datetime column + base_column_data['datetime'] = datetime_col + elif col['key'] == 'peak_monthly_load_kw': + # Special handling for monthly peaks - expand to all timesteps + monthly_peaks = safe_get_list(first_scenario, 'outputs.ElectricLoad.monthly_peaks_kw', []) + base_column_data['peak_monthly_load_kw'] = [ + get_monthly_peak_for_timestep(i, monthly_peaks, time_steps_per_hour) + for i in range(len(datetime_col)) + ] + else: + # Extract timeseries data using lambda function + path_func = col['timeseries_path'] + data = path_func(first_scenario) + base_column_data[col['key']] = data if isinstance(data, list) else [] + + # Extract scenario column data for all scenarios + scenario_column_data = [] + for scenario_idx, scenario in enumerate(scenarios_data): + scenario_data = {} + for col in scenario_columns: + path_func = col['timeseries_path'] + data = path_func(scenario['data']) + scenario_data[col['key']] = data if isinstance(data, list) else [] + + # Log on first scenario for debugging + if scenario_idx == 0: + log.info(f"get_timeseries_table - {col['key']} length: {len(scenario_data[col['key']])}") + + scenario_column_data.append(scenario_data) + + # Write data rows + for row_idx in range(len(datetime_col)): + col_idx = 0 + + # Write base column data + for col in base_columns: + data_list = base_column_data.get(col['key'], []) + + if col['key'] == 'datetime': + # Special handling for datetime - convert string to Excel datetime + datetime_str = data_list[row_idx] if row_idx < len(data_list) else '' + if datetime_str: + dt = datetime.strptime(datetime_str, '%m/%d/%Y %H:%M') + worksheet.write_datetime(row_idx + 1, col_idx, dt, column_formats[col['key']]) + else: + value = data_list[row_idx] if row_idx < len(data_list) else 0 + worksheet.write(row_idx + 1, col_idx, value, column_formats[col['key']]) + + col_idx += 1 + + # Write scenario column data + for scenario_data in scenario_column_data: + for col in scenario_columns: + data_list = scenario_data.get(col['key'], []) + value = data_list[row_idx] if row_idx < len(data_list) else 0 + worksheet.write(row_idx + 1, col_idx, value, column_formats[col['key']]) + col_idx += 1 + + # Freeze panes based on configuration + freeze_panes = formatting_config.get('freeze_panes', (1, 0)) + worksheet.freeze_panes(*freeze_panes) + + # Close workbook + workbook.close() + output.seek(0) + + # Return as downloadable file + response = HttpResponse( + output, + content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + ) + response['Content-Disposition'] = 'attachment; filename="get_timeseries_table.xlsx"' + return response + + except Exception as e: + log.error(f"Error in get_timeseries_table: {e}") + return JsonResponse({"Error": f"An unexpected error occurred: {str(e)}"}, status=500) + +############################################################################################################################## +################################################### END Get Timeseries Table ##################################################### ############################################################################################################################## \ No newline at end of file