intermediate backup

This commit is contained in:
2025-05-03 20:46:14 +02:00
parent 2b0a5728d4
commit 6542caf48f
38 changed files with 4513 additions and 1067 deletions

View File

View File

@ -0,0 +1,22 @@
# forecasting/base.py
from typing import List, Dict, Any
import pandas as pd
import numpy as np
class ForecastProvider:
def get_forecasts(self,
historical_data: pd.DataFrame,
forecast_horizons: List[int],
optimization_horizon: int) -> Dict[int, np.ndarray]:
"""Returns forecasts for each requested horizon."""
pass
def get_required_lookback(self) -> int:
"""Returns the minimum number of historical data points required."""
pass
def get_forecast_horizons(self) -> List[int]:
"""Returns the list of forecast horizons."""
pass

View File

@ -0,0 +1,188 @@
import logging
from typing import List, Dict, Any, Optional
import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from .base import ForecastProvider
from forecasting_model.utils import FeatureConfig
from forecasting_model.train.model import LSTMForecastLightningModule
from forecasting_model import engineer_features
from optimizer.forecasting.utils import interpolate_forecast
logger = logging.getLogger(__name__)
class EnsembleProvider(ForecastProvider):
"""Provides forecasts using an ensemble of trained LSTM models."""
def __init__(
self,
fold_artifacts: List[Dict[str, Any]],
ensemble_method: str,
ensemble_feature_config: FeatureConfig, # Assumed consistent across folds by loading logic
ensemble_target_col: str, # Assumed consistent
):
if not fold_artifacts:
raise ValueError("EnsembleProvider requires at least one fold artifact.")
self.fold_artifacts = fold_artifacts
self.ensemble_method = ensemble_method
# Store common config for reference, but use fold-specific details in get_forecast
self.ensemble_feature_config = ensemble_feature_config
self.ensemble_target_col = ensemble_target_col
self.common_forecast_horizons = sorted(ensemble_feature_config.forecast_horizon) # Assumed consistent
# Calculate max lookback needed across all folds
max_lookback = 0
for i, fold in enumerate(fold_artifacts):
try:
fold_feature_config = fold['feature_config']
fold_seq_len = fold_feature_config.sequence_length
feature_lookback = 0
if fold_feature_config.lags:
feature_lookback = max(feature_lookback, max(fold_feature_config.lags))
if fold_feature_config.rolling_window_sizes:
feature_lookback = max(feature_lookback, max(w - 1 for w in fold_feature_config.rolling_window_sizes))
fold_total_lookback = fold_seq_len + feature_lookback
max_lookback = max(max_lookback, fold_total_lookback)
except KeyError as e:
raise ValueError(f"Fold artifact {i} is missing expected key: {e}") from e
except Exception as e:
raise ValueError(f"Error processing fold artifact {i} for lookback calculation: {e}") from e
self._required_lookback = max_lookback
logger.debug(f"EnsembleProvider initialized with {len(fold_artifacts)} folds. Method: '{ensemble_method}'. Required lookback: {self._required_lookback}")
if ensemble_method not in ['mean', 'median']:
raise ValueError(f"Unsupported ensemble method: {ensemble_method}. Use 'mean' or 'median'.")
def get_required_lookback(self) -> int:
return self._required_lookback
def get_forecast(
self,
historical_data_slice: pd.DataFrame,
optimization_horizon_hours: int
) -> np.ndarray | None:
"""
Generates forecasts from each fold model, interpolates, and aggregates.
"""
logger.debug(f"EnsembleProvider: Generating forecast for {optimization_horizon_hours} hours using {self.ensemble_method}.")
if len(historical_data_slice) < self._required_lookback:
logger.error(f"Insufficient historical data provided. Need {self._required_lookback}, got {len(historical_data_slice)}.")
return None
fold_forecasts_interpolated = []
last_actual_price = historical_data_slice[self.ensemble_target_col].iloc[-1] # Common anchor for all folds
for i, fold_artifact in enumerate(self.fold_artifacts):
fold_id = fold_artifact.get("fold_id", i + 1)
try:
fold_model: LSTMForecastLightningModule = fold_artifact['model_instance']
fold_feature_config: FeatureConfig = fold_artifact['feature_config']
fold_target_scaler: Optional[Any] = fold_artifact['target_scaler']
fold_target_col: str = fold_artifact['main_forecasting_config'].data.target_col # Use fold specific target
fold_seq_len = fold_feature_config.sequence_length
fold_horizons = sorted(fold_feature_config.forecast_horizon)
# Calculate lookback needed *for this specific fold* to check slice length
fold_feature_lookback = 0
if fold_feature_config.lags: fold_feature_lookback = max(fold_feature_lookback, max(fold_feature_config.lags))
if fold_feature_config.rolling_window_sizes: fold_feature_lookback = max(fold_feature_lookback, max(w - 1 for w in fold_feature_config.rolling_window_sizes))
fold_total_lookback = fold_seq_len + fold_feature_lookback
if len(historical_data_slice) < fold_total_lookback:
logger.warning(f"Fold {fold_id}: Skipping fold. Insufficient historical data in slice for this fold's lookback ({fold_total_lookback} needed).")
continue
# 1. Feature Engineering (using fold's config)
# Slice needs to be long enough for this fold's total lookback.
# The input slice `historical_data_slice` should already be long enough based on max_lookback.
engineered_df_fold = engineer_features(historical_data_slice.copy(), fold_target_col, fold_feature_config)
if engineered_df_fold.isnull().any().any():
logger.warning(f"Fold {fold_id}: NaNs found after feature engineering. Attempting fill.")
engineered_df_fold = engineered_df_fold.ffill().bfill()
if engineered_df_fold.isnull().any().any():
logger.error(f"Fold {fold_id}: NaNs persist after fill. Skipping fold.")
continue
# 2. Create *one* input sequence (using fold's sequence length)
if len(engineered_df_fold) < fold_seq_len:
logger.error(f"Fold {fold_id}: Engineered data ({len(engineered_df_fold)}) is shorter than fold sequence length ({fold_seq_len}). Skipping fold.")
continue
input_sequence_data_fold = engineered_df_fold.iloc[-fold_seq_len:].copy()
feature_columns_fold = [col for col in engineered_df_fold.columns if col != fold_target_col] # Example
if not feature_columns_fold: feature_columns_fold = engineered_df_fold.columns.tolist()
input_sequence_np_fold = input_sequence_data_fold[feature_columns_fold].values
if input_sequence_np_fold.shape != (fold_seq_len, len(feature_columns_fold)):
logger.error(f"Fold {fold_id}: Input sequence has wrong shape. Expected ({fold_seq_len}, {len(feature_columns_fold)}), got {input_sequence_np_fold.shape}. Skipping fold.")
continue
input_tensor_fold = torch.FloatTensor(input_sequence_np_fold).unsqueeze(0)
# 3. Run Inference (using fold's model)
fold_model.eval()
with torch.no_grad():
predictions_scaled_fold = fold_model(input_tensor_fold) # Shape (1, num_fold_horizons)
if predictions_scaled_fold.ndim != 2 or predictions_scaled_fold.shape[0] != 1 or predictions_scaled_fold.shape[1] != len(fold_horizons):
logger.error(f"Fold {fold_id}: Prediction output shape mismatch. Expected (1, {len(fold_horizons)}), got {predictions_scaled_fold.shape}. Skipping fold.")
continue
predictions_scaled_np_fold = predictions_scaled_fold.squeeze(0).cpu().numpy()
# 4. Inverse Transform (using fold's scaler)
predictions_original_scale_fold = predictions_scaled_np_fold
if fold_target_scaler:
try:
predictions_original_scale_fold = fold_target_scaler.inverse_transform(predictions_scaled_np_fold.reshape(-1, 1)).flatten()
except Exception as e:
logger.error(f"Fold {fold_id}: Failed to apply inverse transform: {e}. Skipping fold.", exc_info=True)
continue
# 5. Interpolate (using fold's horizons)
interpolated_forecast_fold = interpolate_forecast(
native_horizons=fold_horizons,
native_predictions=predictions_original_scale_fold,
target_horizon=optimization_horizon_hours,
last_known_actual=last_actual_price
)
if interpolated_forecast_fold is not None:
fold_forecasts_interpolated.append(interpolated_forecast_fold)
logger.debug(f"Fold {fold_id}: Successfully generated interpolated forecast.")
else:
logger.warning(f"Fold {fold_id}: Interpolation failed. Skipping fold.")
except Exception as e:
logger.error(f"Error processing ensemble fold {fold_id}: {e}", exc_info=True)
continue # Skip this fold on error
# --- Aggregation ---
if not fold_forecasts_interpolated:
logger.error("No successful forecasts generated from any ensemble folds.")
return None
logger.debug(f"Aggregating forecasts from {len(fold_forecasts_interpolated)} folds using '{self.ensemble_method}'.")
stacked_predictions = np.stack(fold_forecasts_interpolated, axis=0) # Shape (n_folds, target_horizon)
if self.ensemble_method == 'mean':
final_ensemble_forecast = np.mean(stacked_predictions, axis=0)
elif self.ensemble_method == 'median':
final_ensemble_forecast = np.median(stacked_predictions, axis=0)
else:
# Should be caught in __init__, but double-check
logger.error(f"Internal error: Invalid ensemble method '{self.ensemble_method}' during aggregation.")
return None
logger.debug(f"EnsembleProvider: Successfully generated forecast.")
return final_ensemble_forecast

View File

@ -0,0 +1,150 @@
import logging
from typing import List, Dict, Any, Optional
import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# Imports from our project structure
from .base import ForecastProvider
from forecasting_model.utils import FeatureConfig
from forecasting_model.train.model import LSTMForecastLightningModule
from forecasting_model import engineer_features
from optimizer.forecasting.utils import interpolate_forecast
logger = logging.getLogger(__name__)
class SingleModelProvider(ForecastProvider):
"""Provides forecasts using a single trained LSTM model."""
def __init__(
self,
model_instance: LSTMForecastLightningModule,
feature_config: FeatureConfig,
target_col: str,
target_scaler: Optional[Any], # BaseEstimator, TransformerMixin -> more specific if possible
# input_size: int # Not needed directly if model instance is configured
):
self.model = model_instance
self.feature_config = feature_config
self.target_col = target_col
self.target_scaler = target_scaler
self.sequence_length = feature_config.sequence_length
self.forecast_horizons = sorted(feature_config.forecast_horizon) # Ensure sorted
# Calculate required lookback for feature engineering
feature_lookback = 0
if feature_config.lags:
feature_lookback = max(feature_lookback, max(feature_config.lags))
if feature_config.rolling_window_sizes:
# Rolling window of size W needs W-1 previous points
feature_lookback = max(feature_lookback, max(w - 1 for w in feature_config.rolling_window_sizes))
# Total lookback: sequence length for model input + feature engineering needs
# We need `sequence_length` points for the *last* input sequence.
# The first point of that sequence needs `feature_lookback` points before it.
# So, total points needed before the *end* of the input sequence is sequence_length + feature_lookback.
# Since the input sequence ends *before* the first forecast point (t=1),
# we need `sequence_length + feature_lookback` points before t=1.
self._required_lookback = self.sequence_length + feature_lookback
logger.debug(f"SingleModelProvider initialized. Required lookback: {self._required_lookback} (SeqLen: {self.sequence_length}, FeatLookback: {feature_lookback})")
def get_required_lookback(self) -> int:
return self._required_lookback
def get_forecast(
self,
historical_data_slice: pd.DataFrame,
optimization_horizon_hours: int
) -> np.ndarray | None:
"""
Generates forecast using the single model and interpolates to hourly resolution.
"""
logger.debug(f"SingleModelProvider: Generating forecast for {optimization_horizon_hours} hours.")
if len(historical_data_slice) < self._required_lookback:
logger.error(f"Insufficient historical data provided. Need {self._required_lookback}, got {len(historical_data_slice)}.")
return None
try:
# 1. Feature Engineering
# Use the provided slice which already includes the lookback.
engineered_df = engineer_features(historical_data_slice.copy(), self.target_col, self.feature_config)
# Check for NaNs after feature engineering before creating sequences
if engineered_df.isnull().any().any():
logger.warning("NaNs found after feature engineering. Attempting to fill with ffill/bfill.")
# Be careful about filling target vs features if needed
engineered_df = engineered_df.ffill().bfill()
if engineered_df.isnull().any().any():
logger.error("NaNs persist after fill. Cannot create sequences.")
return None
# 2. Create *one* input sequence ending at the last point of the historical slice
# This sequence is used to predict starting from the next hour (t=1)
if len(engineered_df) < self.sequence_length:
logger.error(f"Engineered data ({len(engineered_df)}) is shorter than sequence length ({self.sequence_length}).")
return None
input_sequence_data = engineered_df.iloc[-self.sequence_length:].copy()
# Convert sequence data to numpy array (excluding target if model expects it that way)
# Assuming model takes all engineered features as input
# TODO: Verify the exact features the model expects (target included/excluded?)
# Assuming all columns except maybe the original target are features
feature_columns = [col for col in engineered_df.columns if col != self.target_col] # Example
if not feature_columns: feature_columns = engineered_df.columns.tolist() # Use all if target wasn't dropped
input_sequence_np = input_sequence_data[feature_columns].values
if input_sequence_np.shape != (self.sequence_length, len(feature_columns)):
logger.error(f"Input sequence has wrong shape. Expected ({self.sequence_length}, {len(feature_columns)}), got {input_sequence_np.shape}")
return None
input_tensor = torch.FloatTensor(input_sequence_np).unsqueeze(0) # Add batch dim
# 3. Run Inference
self.model.eval()
with torch.no_grad():
# Model output shape: (1, num_horizons)
predictions_scaled = self.model(input_tensor)
if predictions_scaled.ndim != 2 or predictions_scaled.shape[0] != 1 or predictions_scaled.shape[1] != len(self.forecast_horizons):
logger.error(f"Model prediction output shape mismatch. Expected (1, {len(self.forecast_horizons)}), got {predictions_scaled.shape}.")
return None
predictions_scaled_np = predictions_scaled.squeeze(0).cpu().numpy() # Shape: (num_horizons,)
# 4. Inverse Transform
predictions_original_scale = predictions_scaled_np
if self.target_scaler:
try:
# Scaler expects shape (n_samples, n_features), even if n_features=1
predictions_original_scale = self.target_scaler.inverse_transform(predictions_scaled_np.reshape(-1, 1)).flatten()
logger.debug("Applied inverse transform to predictions.")
except Exception as e:
logger.error(f"Failed to apply inverse transform: {e}", exc_info=True)
# Decide whether to return scaled or None. Returning None is safer.
return None
# 5. Interpolate
# Use the last actual price from the input data as the anchor point t=0
last_actual_price = historical_data_slice[self.target_col].iloc[-1]
interpolated_forecast = interpolate_forecast(
native_horizons=self.forecast_horizons,
native_predictions=predictions_original_scale,
target_horizon=optimization_horizon_hours,
last_known_actual=last_actual_price
)
if interpolated_forecast is None:
logger.error("Interpolation step failed.")
return None
logger.debug(f"SingleModelProvider: Successfully generated forecast.")
return interpolated_forecast
except Exception as e:
logger.error(f"Error during single model forecast generation: {e}", exc_info=True)
return None

View File

@ -0,0 +1,67 @@
from typing import List, Optional, Dict, Any
import numpy as np
import logging
logger = logging.getLogger(__name__)
# --- Interpolation Helper ---
def interpolate_forecast(
native_horizons: List[int],
native_predictions: np.ndarray,
target_horizon: int,
last_known_actual: Optional[float] = None # Optional: use last known price as t=0 for anchor
) -> np.ndarray | None:
"""
Linearly interpolates model predictions at native horizons to a full hourly sequence.
Args:
native_horizons: List of horizons the model predicts (e.g., [1, 6, 12, 24]). Must not be empty.
native_predictions: Numpy array of predictions corresponding to native_horizons. Must not be empty.
target_horizon: The desired length of the hourly forecast (e.g., 24).
last_known_actual: Optional last actual price before the forecast starts (at t=0). Used as anchor if 0 not in native_horizons.
Returns:
A numpy array of shape (target_horizon,) with interpolated values, or None on error.
"""
if not native_horizons or native_predictions is None or native_predictions.size == 0:
logger.error("Cannot interpolate with empty native horizons or predictions.")
return None
if len(native_horizons) != len(native_predictions):
logger.error(f"Mismatched lengths: native_horizons ({len(native_horizons)}) vs native_predictions ({len(native_predictions)})")
return None
try:
# Ensure horizons are sorted
sorted_indices = np.argsort(native_horizons)
# Use float for potentially non-integer horizons if ever needed, ensure points > 0 usually
xp = np.array(native_horizons, dtype=float)[sorted_indices]
fp = native_predictions[sorted_indices]
# Target points for interpolation (hours 1 to target_horizon)
x_target = np.arange(1, target_horizon + 1, dtype=float)
# Add t=0 point if provided and 0 is not already a native horizon
# This anchors the start of the interpolation.
if last_known_actual is not None and xp[0] > 0:
xp = np.insert(xp, 0, 0.0)
fp = np.insert(fp, 0, last_known_actual)
elif xp[0] == 0 and last_known_actual is not None:
logger.debug("Native horizons include 0, using model's prediction for t=0 instead of last_known_actual.")
elif last_known_actual is None and xp[0] > 0:
logger.warning("No last_known_actual provided and native horizons start > 0. Interpolation might be less accurate at the beginning.")
# If the first native horizon is > 1, np.interp will extrapolate constantly backwards from the first point.
# Check if target range requires extrapolation beyond the model's capability
if target_horizon > xp[-1]:
logger.warning(f"Target horizon ({target_horizon}) extends beyond the maximum native forecast horizon ({xp[-1]}). Extrapolation will occur (constant value).")
interpolated_values = np.interp(x_target, xp, fp)
return interpolated_values
except Exception as e:
logger.error(f"Linear interpolation failed: {e}", exc_info=True)
return None