Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
e7ce3fb
feat: removed mutable objects from default from pytorch_forecasting.d…
eugenio-mercuriali Oct 18, 2024
3d75f61
feat: removed mutable objects from default from tft, base model and b…
eugenio-mercuriali Oct 18, 2024
dea4c12
feat: removed mutable objects from default from deepar and mlp
eugenio-mercuriali Oct 19, 2024
c45b801
feat: removed mutable objects from default from tft model
eugenio-mercuriali Oct 19, 2024
a6666f0
feat: removed mutable objects from default from nbeats, nhits, rnn an…
eugenio-mercuriali Oct 19, 2024
d12c2ad
fix: rollback renaming of monotone constraints
eugenio-mercuriali Oct 19, 2024
5d620a3
fix: create new objects when self write
eugenio-mercuriali Oct 20, 2024
e6b238a
style: one liner init
eugenio-mercuriali Oct 20, 2024
1ef8636
fix: test + better type hints
eugenio-mercuriali Oct 20, 2024
43a90b4
feat: added protected attributes and properties for self writes
eugenio-mercuriali Oct 20, 2024
c3cb449
feat: add weights setter
eugenio-mercuriali Oct 21, 2024
0cd03f5
Merge branch 'main' into pr/1699
fkiraly Oct 22, 2024
842f927
Merge branch 'main' into pr/1699
fkiraly Oct 22, 2024
f2b1414
fix: ruff
eugenio-mercuriali Oct 22, 2024
2b506cb
refactor: revert 43a90b437f87440a4a9c960c02ca0aed219ba42a
eugenio-mercuriali Oct 26, 2024
175bc49
refactor: refactor usages of self.attr to self._attr, using resolved …
eugenio-mercuriali Oct 26, 2024
5eef698
fix: remove unnecessary comprehension
eugenio-mercuriali Oct 26, 2024
911700d
fix: adjusted building notebook
eugenio-mercuriali Oct 26, 2024
8f77ce4
Merge branch 'main' into feature/issue-1668
eugenio-mercuriali Oct 26, 2024
a66c2e0
Merge branch 'main' into feature/issue-1668
eugenio-mercuriali Nov 6, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions pytorch_forecasting/data/encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import numpy as np
import pandas as pd
from copy import deepcopy
from sklearn.base import BaseEstimator, TransformerMixin
import torch
from torch.distributions import constraints
Expand Down Expand Up @@ -396,7 +397,7 @@ def __init__(
method: str = "standard",
center: bool = True,
transformation: Union[str, Tuple[Callable, Callable]] = None,
method_kwargs: Dict[str, Any] = {},
method_kwargs: Dict[str, Any] = None,
):
"""
Args:
Expand Down Expand Up @@ -427,7 +428,7 @@ def __init__(
assert method in ["standard", "robust", "identity"], f"method has invalid value {method}"
self.center = center
self.transformation = transformation
self.method_kwargs = method_kwargs
self.method_kwargs = deepcopy(method_kwargs) if method_kwargs is not None else {}

def get_parameters(self, *args, **kwargs) -> torch.Tensor:
"""
Expand Down Expand Up @@ -623,7 +624,7 @@ def __init__(
center: bool = True,
max_length: Union[int, List[int]] = None,
transformation: Union[str, Tuple[Callable, Callable]] = None,
method_kwargs: Dict[str, Any] = {},
method_kwargs: Dict[str, Any] = None,
):
"""
Initialize
Expand Down Expand Up @@ -655,6 +656,7 @@ def __init__(
should be defined if ``reverse`` is not the inverse of the forward transformation. ``inverse_torch``
can be defined to provide a torch distribution transform for inverse transformations.
"""
method_kwargs = deepcopy(method_kwargs) if method_kwargs is not None else {}
super().__init__(method=method, center=center, transformation=transformation, method_kwargs=method_kwargs)
self.max_length = max_length

Expand Down Expand Up @@ -726,11 +728,11 @@ class GroupNormalizer(TorchNormalizer):
def __init__(
self,
method: str = "standard",
groups: List[str] = [],
groups: List[str] = None,
center: bool = True,
scale_by_group: bool = False,
transformation: Union[str, Tuple[Callable, Callable]] = None,
method_kwargs: Dict[str, Any] = {},
method_kwargs: Dict[str, Any] = None,
):
"""
Group normalizer to normalize a given entry by groups. Can be used as target normalizer.
Expand Down Expand Up @@ -764,8 +766,9 @@ def __init__(
can be defined to provide a torch distribution transform for inverse transformations.

"""
self.groups = groups
self.groups = list(groups) if groups is not None else []
self.scale_by_group = scale_by_group
method_kwargs = deepcopy(method_kwargs) if method_kwargs is not None else {}
super().__init__(method=method, center=center, transformation=transformation, method_kwargs=method_kwargs)

def fit(self, y: pd.Series, X: pd.DataFrame):
Expand Down
54 changes: 29 additions & 25 deletions pytorch_forecasting/data/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,22 +185,22 @@ def __init__(
min_prediction_idx: int = None,
min_prediction_length: int = None,
max_prediction_length: int = 1,
static_categoricals: List[str] = [],
static_reals: List[str] = [],
time_varying_known_categoricals: List[str] = [],
time_varying_known_reals: List[str] = [],
time_varying_unknown_categoricals: List[str] = [],
time_varying_unknown_reals: List[str] = [],
variable_groups: Dict[str, List[int]] = {},
constant_fill_strategy: Dict[str, Union[str, float, int, bool]] = {},
static_categoricals: List[str] = None,
static_reals: List[str] = None,
time_varying_known_categoricals: List[str] = None,
time_varying_known_reals: List[str] = None,
time_varying_unknown_categoricals: List[str] = None,
time_varying_unknown_reals: List[str] = None,
variable_groups: Dict[str, List[int]] = None,
constant_fill_strategy: Dict[str, Union[str, float, int, bool]] = None,
allow_missing_timesteps: bool = False,
lags: Dict[str, List[int]] = {},
lags: Dict[str, List[int]] = None,
add_relative_time_idx: bool = False,
add_target_scales: bool = False,
add_encoder_length: Union[bool, str] = "auto",
target_normalizer: Union[NORMALIZER, str, List[NORMALIZER], Tuple[NORMALIZER], None] = "auto",
categorical_encoders: Dict[str, NaNLabelEncoder] = {},
scalers: Dict[str, Union[StandardScaler, RobustScaler, TorchNormalizer, EncoderNormalizer]] = {},
categorical_encoders: Dict[str, NaNLabelEncoder] = None,
scalers: Dict[str, Union[StandardScaler, RobustScaler, TorchNormalizer, EncoderNormalizer]] = None,
randomize_length: Union[None, Tuple[float, float], bool] = False,
predict_mode: bool = False,
):
Expand Down Expand Up @@ -352,13 +352,17 @@ def __init__(
self.target = target
self.weight = weight
self.time_idx = time_idx
self.group_ids = [] + group_ids
self.static_categoricals = [] + static_categoricals
self.static_reals = [] + static_reals
self.time_varying_known_categoricals = [] + time_varying_known_categoricals
self.time_varying_known_reals = [] + time_varying_known_reals
self.time_varying_unknown_categoricals = [] + time_varying_unknown_categoricals
self.time_varying_unknown_reals = [] + time_varying_unknown_reals
self.group_ids = [] if group_ids is None else list(group_ids)
self.static_categoricals = [] if static_categoricals is None else list(static_categoricals)
self.static_reals = [] if static_reals is None else list(static_reals)
self.time_varying_known_categoricals = (
[] if time_varying_known_categoricals is None else list(time_varying_known_categoricals)
)
self.time_varying_known_reals = [] if time_varying_known_reals is None else list(time_varying_known_reals)
self.time_varying_unknown_categoricals = (
[] if time_varying_unknown_categoricals is None else list(time_varying_unknown_categoricals)
)
self.time_varying_unknown_reals = [] if time_varying_unknown_reals is None else list(time_varying_unknown_reals)
self.add_relative_time_idx = add_relative_time_idx

# set automatic defaults
Expand All @@ -371,15 +375,15 @@ def __init__(
if min_prediction_idx is None:
min_prediction_idx = data[self.time_idx].min()
self.min_prediction_idx = min_prediction_idx
self.constant_fill_strategy = {} if len(constant_fill_strategy) == 0 else constant_fill_strategy
self.constant_fill_strategy = {} if constant_fill_strategy is None else deepcopy(constant_fill_strategy)
self.predict_mode = predict_mode
self.allow_missing_timesteps = allow_missing_timesteps
self.target_normalizer = target_normalizer
self.categorical_encoders = {} if len(categorical_encoders) == 0 else categorical_encoders
self.scalers = {} if len(scalers) == 0 else scalers
self.categorical_encoders = {} if categorical_encoders is None else deepcopy(categorical_encoders)
self.scalers = {} if scalers is None else deepcopy(scalers)
self.add_target_scales = add_target_scales
self.variable_groups = {} if len(variable_groups) == 0 else variable_groups
self.lags = {} if len(lags) == 0 else lags
self.variable_groups = {} if variable_groups is None else deepcopy(variable_groups)
self.lags = {} if lags is None else deepcopy(lags)

# add_encoder_length
if isinstance(add_encoder_length, str):
Expand Down Expand Up @@ -412,7 +416,7 @@ def __init__(
), "relative_time_idx is a protected column and must not be present in data"
if "relative_time_idx" not in self.time_varying_known_reals and "relative_time_idx" not in self.reals:
self.time_varying_known_reals.append("relative_time_idx")
data.loc[:, "relative_time_idx"] = 0.0 # dummy - real value will be set dynamiclly in __getitem__()
data.loc[:, "relative_time_idx"] = 0.0 # dummy - real value will be set dynamically in __getitem__()

# add decoder length to static real variables
if self.add_encoder_length:
Expand All @@ -421,7 +425,7 @@ def __init__(
), "encoder_length is a protected column and must not be present in data"
if "encoder_length" not in self.time_varying_known_reals and "encoder_length" not in self.reals:
self.static_reals.append("encoder_length")
data.loc[:, "encoder_length"] = 0 # dummy - real value will be set dynamiclly in __getitem__()
data.loc[:, "encoder_length"] = 0 # dummy - real value will be set dynamically in __getitem__()

# validate
self._validate_data(data)
Expand Down
14 changes: 8 additions & 6 deletions pytorch_forecasting/metrics/base_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -503,12 +503,14 @@ class CompositeMetric(LightningMetric):
higher_is_better = False
is_differentiable = True

def __init__(self, metrics: List[LightningMetric] = [], weights: List[float] = None):
def __init__(self, metrics: List[LightningMetric] = None, weights: List[float] = None):
"""
Args:
metrics (List[LightningMetric], optional): list of metrics to combine. Defaults to [].
metrics (List[LightningMetric], optional): list of metrics to combine. Defaults to None.
weights (List[float], optional): list of weights / multipliers for weights. Defaults to 1.0 for all metrics.
"""
if metrics is None:
metrics = []
if weights is None:
weights = [1.0 for _ in metrics]
assert len(weights) == len(metrics), "Number of weights has to match number of metrics"
Expand Down Expand Up @@ -897,9 +899,7 @@ class DistributionLoss(MultiHorizonMetric):
distribution_class: distributions.Distribution
distribution_arguments: List[str]

def __init__(
self, name: str = None, quantiles: List[float] = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98], reduction="mean"
):
def __init__(self, name: str = None, quantiles: Optional[List[float]] = None, reduction="mean"):
"""
Initialize metric

Expand All @@ -909,6 +909,8 @@ def __init__(
Defaults to [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98].
reduction (str, optional): Reduction, "none", "mean" or "sqrt-mean". Defaults to "mean".
"""
if quantiles is None:
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
super().__init__(name=name, quantiles=quantiles, reduction=reduction)

def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Distribution:
Expand Down Expand Up @@ -945,7 +947,7 @@ def to_prediction(self, y_pred: torch.Tensor, n_samples: int = 100) -> torch.Ten

Args:
y_pred: prediction output of network

n_samples (int): number of samples to draw
Returns:
torch.Tensor: mean prediction
"""
Expand Down
12 changes: 9 additions & 3 deletions pytorch_forecasting/metrics/distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class MultivariateNormalDistributionLoss(MultivariateDistributionLoss):
def __init__(
self,
name: str = None,
quantiles: List[float] = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98],
quantiles: Optional[List[float]] = None,
reduction: str = "mean",
rank: int = 10,
sigma_init: float = 1.0,
Expand All @@ -71,6 +71,8 @@ def __init__(
sigma_init (float, optional): default value for diagonal covariance. Defaults to 1.0.
sigma_minimum (float, optional): minimum value for diagonal covariance. Defaults to 1e-3.
"""
if quantiles is None:
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
super().__init__(name=name, quantiles=quantiles, reduction=reduction)
self.rank = rank
self.sigma_minimum = sigma_minimum
Expand Down Expand Up @@ -263,7 +265,7 @@ class MQF2DistributionLoss(DistributionLoss):
def __init__(
self,
prediction_length: int,
quantiles: List[float] = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98],
quantiles: Optional[List[float]] = None,
hidden_size: Optional[int] = 4,
es_num_samples: int = 50,
beta: float = 1.0,
Expand All @@ -286,6 +288,8 @@ def __init__(
icnn_num_layers (int, optional): number of hidden layers in distribution estimating network. Defaults to 2.
estimate_logdet (bool, optional): if to estimate log determinant. Defaults to False.
"""
if quantiles is None:
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
super().__init__(quantiles=quantiles)

from cpflows.flows import ActNorm
Expand Down Expand Up @@ -445,7 +449,7 @@ class ImplicitQuantileNetworkDistributionLoss(DistributionLoss):

def __init__(
self,
quantiles: List[float] = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98],
quantiles: Optional[List[float]] = None,
input_size: Optional[int] = 16,
hidden_size: Optional[int] = 32,
n_loss_samples: Optional[int] = 64,
Expand All @@ -459,6 +463,8 @@ def __init__(
hidden_size (int, optional): hidden size per prediction length. Defaults to 64.
n_loss_samples (int, optional): number of quantiles to sample to calculate loss.
"""
if quantiles is None:
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
super().__init__(quantiles=quantiles)
self.quantile_network = ImplicitQuantileNetwork(input_size=input_size, hidden_size=hidden_size)
self.distribution_arguments = list(range(int(input_size)))
Expand Down
3 changes: 2 additions & 1 deletion pytorch_forecasting/metrics/point.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ def update(
def loss(self, y_pred, target, scaling):
return (self.to_prediction(y_pred) - target).abs() / scaling.unsqueeze(-1)

def calculate_scaling(self, target, lengths, encoder_target, encoder_lengths):
@staticmethod
def calculate_scaling(target, lengths, encoder_target, encoder_lengths):
# calcualte mean(abs(diff(targets)))
eps = 1e-6
batch_size = target.size(0)
Expand Down
6 changes: 4 additions & 2 deletions pytorch_forecasting/metrics/quantile.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Quantile metrics for forecasting multiple quantiles per time step."""

from typing import List
from typing import List, Optional

import torch

Expand All @@ -16,7 +16,7 @@ class QuantileLoss(MultiHorizonMetric):

def __init__(
self,
quantiles: List[float] = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98],
quantiles: Optional[List[float]] = None,
**kwargs,
):
"""
Expand All @@ -25,6 +25,8 @@ def __init__(
Args:
quantiles: quantiles for metric
"""
if quantiles is None:
quantiles = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
super().__init__(quantiles=quantiles, **kwargs)

def loss(self, y_pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
Expand Down
27 changes: 20 additions & 7 deletions pytorch_forecasting/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ def __init__(
reduce_on_plateau_min_lr: float = 1e-5,
weight_decay: float = 0.0,
optimizer_params: Dict[str, Any] = None,
monotone_constaints: Dict[str, int] = {},
monotone_constaints: Dict[str, int] = None,
output_transformer: Callable = None,
optimizer=None,
):
Expand Down Expand Up @@ -446,6 +446,8 @@ def __init__(
`"ranger" <https://pytorch-optimizers.readthedocs.io/en/latest/optimizer_api.html#ranger21>`_,
if pytorch_optimizer is installed, otherwise "adam".
"""
if monotone_constaints is None:
monotone_constaints = {}
super().__init__()
# update hparams
frame = inspect.currentframe()
Expand Down Expand Up @@ -690,8 +692,8 @@ def create_log(
y: Tuple[torch.Tensor, torch.Tensor],
out: Dict[str, torch.Tensor],
batch_idx: int,
prediction_kwargs: Dict[str, Any] = {},
quantiles_kwargs: Dict[str, Any] = {},
prediction_kwargs: Optional[Dict[str, Any]] = None,
quantiles_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""
Create the log used in the training and validation step.
Expand All @@ -709,6 +711,9 @@ def create_log(
Returns:
Dict[str, Any]: log dictionary to be returned by training and validation steps
"""

prediction_kwargs = {} if prediction_kwargs is None else deepcopy(prediction_kwargs)
quantiles_kwargs = {} if quantiles_kwargs is None else deepcopy(quantiles_kwargs)
# log
if isinstance(self.loss, DistributionLoss):
prediction_kwargs.setdefault("n_samples", 20)
Expand Down Expand Up @@ -1005,8 +1010,8 @@ def plot_prediction(
add_loss_to_title: Union[Metric, torch.Tensor, bool] = False,
show_future_observed: bool = True,
ax=None,
quantiles_kwargs: Dict[str, Any] = {},
prediction_kwargs: Dict[str, Any] = {},
quantiles_kwargs: Optional[Dict[str, Any]] = None,
prediction_kwargs: Optional[Dict[str, Any]] = None,
):
"""
Plot prediction of prediction vs actuals
Expand All @@ -1026,6 +1031,11 @@ def plot_prediction(
Returns:
matplotlib figure
"""
if quantiles_kwargs is None:
quantiles_kwargs = {}
if prediction_kwargs is None:
prediction_kwargs = {}

_check_matplotlib("plot_prediction")

from matplotlib import pyplot as plt
Expand Down Expand Up @@ -2293,8 +2303,8 @@ def plot_prediction(
add_loss_to_title: Union[Metric, torch.Tensor, bool] = False,
show_future_observed: bool = True,
ax=None,
quantiles_kwargs: Dict[str, Any] = {},
prediction_kwargs: Dict[str, Any] = {},
quantiles_kwargs: Optional[Dict[str, Any]] = None,
prediction_kwargs: Optional[Dict[str, Any]] = None,
):
"""
Plot prediction of prediction vs actuals
Expand All @@ -2315,6 +2325,9 @@ def plot_prediction(
matplotlib figure
"""

prediction_kwargs = {} if prediction_kwargs is None else deepcopy(prediction_kwargs)
quantiles_kwargs = {} if quantiles_kwargs is None else deepcopy(quantiles_kwargs)

# get predictions
if isinstance(self.loss, DistributionLoss):
prediction_kwargs.setdefault("use_metric", False)
Expand Down
Loading