From 579cf5affe70475e3e64654f9dc753908c094d78 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sun, 7 Sep 2025 19:18:03 +0530 Subject: [PATCH 01/22] Refactor updates --- .../_beta/_beta_distribution_loss_pkg.py | 8 ++- .../_log_normal_distribution_loss_pkg.py | 8 ++- .../_mqf2/_mqf2_distribution_loss_pkg.py | 9 ++- ...ltivariate_normal_distribution_loss_pkg.py | 9 ++- ...negative_binomial_distribution_loss_pkg.py | 6 +- pytorch_forecasting/tests/_loss_mapping.py | 65 +++++++------------ 6 files changed, 58 insertions(+), 47 deletions(-) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index 1e146f678..92b1571de 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -4,7 +4,7 @@ from pytorch_forecasting.data import TorchNormalizer from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric - +from pytorch_forecasting.data.encoders import GroupNormalizer class BetaDistributionLoss_pkg(_BasePtMetric): """ @@ -16,6 +16,12 @@ class BetaDistributionLoss_pkg(_BasePtMetric): "distribution_type": "beta", "info:metric_name": "BetaDistributionLoss", "requires:data_type": "beta_distribution_forecast", + "clip_target": True, + "data_loader_kwargs": { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="logit" + ) + }, } @classmethod diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index a17d7f862..f7c54eedc 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -6,7 +6,7 @@ from pytorch_forecasting.data import TorchNormalizer from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric - +from pytorch_forecasting.data.encoders import GroupNormalizer class LogNormalDistributionLoss_pkg(_BasePtMetric): """ @@ -18,6 +18,12 @@ class LogNormalDistributionLoss_pkg(_BasePtMetric): "distribution_type": "log_normal", "info:metric_name": "LogNormalDistributionLoss", "requires:data_type": "log_normal_distribution_forecast", + "clip_target": True, + "data_loader_kwargs": { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="log1p" + ) + }, } @classmethod diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py index ed925ccfe..11e536c96 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py @@ -4,7 +4,7 @@ from pytorch_forecasting.data import TorchNormalizer from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric - +from pytorch_forecasting.data.encoders import GroupNormalizer class MQF2DistributionLoss_pkg(_BasePtMetric): """ @@ -18,6 +18,13 @@ class MQF2DistributionLoss_pkg(_BasePtMetric): "python_dependencies": ["cpflows"], "capability:quantile_generation": True, "requires:data_type": "mqf2_distribution_forecast", + "clip_target": True, + "data_loader_kwargs": { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], center=False, transformation="log1p" + ) + }, + "trainer_kwargs": dict(accelerator="cpu"), } @classmethod diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 9e0db69f6..95537a81e 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -3,7 +3,7 @@ """ from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric - +from pytorch_forecasting.data.encoders import GroupNormalizer class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): """ @@ -17,6 +17,11 @@ class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): "distribution_type": "multivariate_normal", "info:metric_name": "MultivariateNormalDistributionLoss", "requires:data_type": "multivariate_normal_distribution_forecast", + "data_loader_kwargs": { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="log1p" + ) + }, } @classmethod @@ -25,4 +30,4 @@ def get_cls(cls): MultivariateNormalDistributionLoss, ) - return MultivariateNormalDistributionLoss + return MultivariateNormalDistributionLoss \ No newline at end of file diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index 12f2ef65a..de59c02e3 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -4,7 +4,7 @@ from pytorch_forecasting.data import TorchNormalizer from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric - +from pytorch_forecasting.data.encoders import GroupNormalizer class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): """ @@ -16,6 +16,10 @@ class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): "distribution_type": "negative_binomial", "info:metric_name": "NegativeBinomialDistributionLoss", "requires:data_type": "negative_binomial_distribution_forecast", + "clip_target": False, + "data_loader_kwargs": { + "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) + }, } @classmethod diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index d2b41fc3e..4131bd582 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -1,4 +1,3 @@ -from pytorch_forecasting.data.encoders import GroupNormalizer from pytorch_forecasting.metrics import ( MAE, MAPE, @@ -18,6 +17,16 @@ TweedieLoss, ) +from pytorch_forecasting.metrics._distributions_pkg import ( + BetaDistributionLoss_pkg, + ImplicitQuantileNetworkDistributionLoss_pkg, + LogNormalDistributionLoss_pkg, + MQF2DistributionLoss_pkg, + MultivariateNormalDistributionLoss_pkg, + NegativeBinomialDistributionLoss_pkg, + NormalDistributionLoss_pkg, +) + POINT_LOSSES_NUMERIC = [ MAE(), RMSE(), @@ -46,6 +55,15 @@ # todo: still need some debugging to add the MQF2DistributionLoss ] +METRIC_PKGS = [ + BetaDistributionLoss_pkg, + NegativeBinomialDistributionLoss_pkg, + MultivariateNormalDistributionLoss_pkg, + LogNormalDistributionLoss_pkg, + NormalDistributionLoss_pkg, + ImplicitQuantileNetworkDistributionLoss_pkg +] + LOSSES_BY_PRED_AND_Y_TYPE = { ("point", "numeric"): POINT_LOSSES_NUMERIC, ("point", "category"): POINT_LOSSES_CATEGORY, @@ -55,46 +73,11 @@ ("distr", "category"): [], } - LOSS_SPECIFIC_PARAMS = { - "BetaDistributionLoss": { - "clip_target": True, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="logit" - ) - }, - }, - "LogNormalDistributionLoss": { - "clip_target": True, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="log1p" - ) - }, - }, - "NegativeBinomialDistributionLoss": { - "clip_target": False, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) - }, - }, - "MultivariateNormalDistributionLoss": { - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="log1p" - ) - }, - }, - "MQF2DistributionLoss": { - "clip_target": True, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], center=False, transformation="log1p" - ) - }, - "trainer_kwargs": dict(accelerator="cpu"), - }, + pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { + k: v for k, v in pkg._tags.items() if k not in ["metric_type", "distribution_type", "info:metric_name", "requires:data_type"] + } + for pkg in METRIC_PKGS } @@ -121,4 +104,4 @@ def get_compatible_losses(pred_types, y_types): if key in LOSSES_BY_PRED_AND_Y_TYPE: compatible_losses.extend(LOSSES_BY_PRED_AND_Y_TYPE[key]) - return compatible_losses + return compatible_losses \ No newline at end of file From 14512f984d677bbc2856ea68a3a7525664433a25 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Mon, 8 Sep 2025 18:34:50 +0530 Subject: [PATCH 02/22] Refactor --- .../_beta/_beta_distribution_loss_pkg.py | 22 +++++ ..._quantile_network_distribution_loss_pkg.py | 18 +++++ .../_log_normal_distribution_loss_pkg.py | 22 +++++ ...ltivariate_normal_distribution_loss_pkg.py | 21 ++++- ...negative_binomial_distribution_loss_pkg.py | 22 +++++ .../_normal/_normal_distribution_loss_pkg.py | 19 +++++ .../_cross_entropy/_cross_entropy_pkg.py | 19 +++++ .../metrics/_point_pkg/_mae/_mae_pkg.py | 19 +++++ .../metrics/_point_pkg/_mape/_mape_pkg.py | 19 +++++ .../metrics/_point_pkg/_mase/_mase_pkg.py | 19 +++++ .../_point_pkg/_poisson/_poisson_loss_pkg.py | 19 +++++ .../metrics/_point_pkg/_rmse/_rmse_pkg.py | 19 +++++ .../metrics/_point_pkg/_smape/_smape_pkg.py | 19 +++++ .../_point_pkg/_tweedie/_tweedie_loss_pkg.py | 19 +++++ .../_quantile_pkg/_quantile_loss_pkg.py | 19 +++++ pytorch_forecasting/tests/_loss_mapping.py | 80 ++++++------------- .../tests/test_all_estimators.py | 1 - 17 files changed, 318 insertions(+), 58 deletions(-) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index 92b1571de..50160bc0d 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -22,6 +22,9 @@ class BetaDistributionLoss_pkg(_BasePtMetric): groups=["agency", "sku"], transformation="logit" ) }, + "compatible_pred_types": ["distr"], + "compatible_y_types": ["numeric"], + "expected_loss_ndim": 2, } @classmethod @@ -36,3 +39,22 @@ def get_encoder(cls): Returns a TorchNormalizer instance for rescaling parameters. """ return TorchNormalizer(transformation="logit") + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Return test dataloaders configured for BetaDistributionLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + clip_target = cls._tags.get("clip_target", False) + data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + + data = data_with_covariates() + if clip_target: + data["target"] = data["target"].clip(1e-4, 1 - 1e-4) + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py index d3589154b..61b8bc8d7 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py @@ -18,6 +18,8 @@ class ImplicitQuantileNetworkDistributionLoss_pkg(_BasePtMetric): "requires:data_type": "implicit_quantile_network_distribution_forecast", "capability:quantile_generation": True, "shape:adds_quantile_dimension": True, + "compatible_pred_types": ["distr"], + "compatible_y_types": ["numeric"], } @classmethod @@ -44,3 +46,19 @@ def get_metric_test_params(cls): fixture for testing the ImplicitQuantileNetworkDistributionLoss metric. """ return [{"input_size": 5}] + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for ImplicitQuantileNetworkDistributionLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + + data = data_with_covariates() + dataloaders = make_dataloaders(data **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index f7c54eedc..b3d072317 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -24,6 +24,9 @@ class LogNormalDistributionLoss_pkg(_BasePtMetric): groups=["agency", "sku"], transformation="log1p" ) }, + "compatible_pred_types": ["distr"], + "compatible_y_types": ["numeric"], + "expected_loss_ndim": 2, } @classmethod @@ -54,3 +57,22 @@ def prepare_test_inputs(cls, test_case): ) return y_pred, y + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for LogNormalDistributionLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + clip_target = cls._tags.get("clip_target", False) + data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + + data = data_with_covariates() + if clip_target: + data["target"] = data["target"].clip(1e-4, None) + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 95537a81e..247edf0bd 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -22,6 +22,9 @@ class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): groups=["agency", "sku"], transformation="log1p" ) }, + "compatible_pred_types": ["distr"], + "compatible_y_types": ["numeric"], + "expected_loss_ndim": 2, } @classmethod @@ -30,4 +33,20 @@ def get_cls(cls): MultivariateNormalDistributionLoss, ) - return MultivariateNormalDistributionLoss \ No newline at end of file + return MultivariateNormalDistributionLoss + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for MultivariateNormalDistributionLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders \ No newline at end of file diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index de59c02e3..63625f6f5 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -20,6 +20,9 @@ class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): "data_loader_kwargs": { "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) }, + "compatible_pred_types": ["distr"], + "compatible_y_types": ["numeric"], + "expected_loss_ndim": 2, } @classmethod @@ -36,3 +39,22 @@ def get_encoder(cls): Returns a TorchNormalizer instance for rescaling parameters. """ return TorchNormalizer(center=False) + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for NegativeBinomialDistributionLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + clip_target = cls._tags.get("clip_target", False) + data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + + data = data_with_covariates() + if clip_target: + data["target"] = data["target"].clip(1e-4, None) + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py index 653eddfdd..9b4133073 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py @@ -17,6 +17,9 @@ class NormalDistributionLoss_pkg(_BasePtMetric): "distribution_type": "normal", "info:metric_name": "NormalDistributionLoss", "requires:data_type": "normal_distribution_forecast", + "compatible_pred_types": ["distr"], + "compatible_y_types": ["numeric"], + "expected_loss_ndim": 2, } @classmethod @@ -24,3 +27,19 @@ def get_cls(cls): from pytorch_forecasting.metrics.distributions import NormalDistributionLoss return NormalDistributionLoss + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for NormalDistributionLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py index 03ae9e647..5104d5c69 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py @@ -18,6 +18,8 @@ class CrossEntropy_pkg(_BasePtMetric): "requires:data_type": "classification_forecast", "info:metric_name": "CrossEntropy", "no_rescaling": True, + "compatible_pred_types": ["point"], + "compatible_y_types": ["category"], } @classmethod @@ -25,3 +27,20 @@ def get_cls(cls): from pytorch_forecasting.metrics import CrossEntropy return CrossEntropy + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for CrossEntropy. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For classification, set target to a categorical column, e.g., "agency" + data_loader_kwargs.setdefault("target", "agency") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py index f2db78f80..a82111fd0 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py @@ -16,6 +16,8 @@ class MAE_pkg(_BasePtMetric): "metric_type": "point", "requires:data_type": "point_forecast", "info:metric_name": "MAE", + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } @classmethod @@ -23,3 +25,20 @@ def get_cls(cls): from pytorch_forecasting.metrics import MAE return MAE + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for MAE. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py index db9051c75..5cc673d38 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py @@ -18,6 +18,8 @@ class MAPE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "MAPE", "requires:data_type": "point_forecast", + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } @classmethod @@ -25,3 +27,20 @@ def get_cls(cls): from pytorch_forecasting.metrics.point import MAPE return MAPE + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for MAPE. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py index f128b125d..eb3f428eb 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py @@ -14,6 +14,8 @@ class MASE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "MASE", "requires:data_type": "point_forecast", + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } @classmethod @@ -21,3 +23,20 @@ def get_cls(cls): from pytorch_forecasting.metrics import MASE return MASE + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for MASE. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py index 6ac1c3338..61aab5af1 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py @@ -18,6 +18,8 @@ class PoissonLoss_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "capability:quantile_generation": True, "shape:adds_quantile_dimension": True, + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } @classmethod @@ -25,3 +27,20 @@ def get_cls(cls): from pytorch_forecasting.metrics.point import PoissonLoss return PoissonLoss + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for PoissonLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py index d9db301eb..43b5df235 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py @@ -16,6 +16,8 @@ class RMSE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "RMSE", "requires:data_type": "point_forecast", + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } # noqa: E501 @classmethod @@ -23,3 +25,20 @@ def get_cls(cls): from pytorch_forecasting.metrics.point import RMSE return RMSE + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for RMSE. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py index 00e0f3d13..f002c8037 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py @@ -18,6 +18,8 @@ class SMAPE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "SMAPE", "requires:data_type": "point_forecast", + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } # noqa: E501 @classmethod @@ -25,3 +27,20 @@ def get_cls(cls): from pytorch_forecasting.metrics.point import SMAPE return SMAPE + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for SMAPE. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py index 07250ff33..1f2207b10 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py @@ -16,6 +16,8 @@ class TweedieLoss_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "TweedieLoss", "requires:data_type": "point_forecast", + "compatible_pred_types": ["point"], + "compatible_y_types": ["numeric"], } # noqa: E501 @classmethod @@ -23,3 +25,20 @@ def get_cls(cls): from pytorch_forecasting.metrics.point import TweedieLoss return TweedieLoss + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for TweedieLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For point metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index fd3e27c14..1c2a41054 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -16,6 +16,8 @@ class QuantileLoss_pkg(_BasePtMetric): "metric_type": "quantile", "info:metric_name": "QuantileLoss", "requires:data_type": "quantile_forecast", + "compatible_pred_types": ["quantile"], + "compatible_y_types": ["numeric"], } # noqa: E501 @classmethod @@ -34,3 +36,20 @@ def get_metric_test_params(cls): "quantiles": [0.2, 0.5], }, ] + + @classmethod + def _get_test_dataloaders_from(cls, params=None): + """ + Returns test dataloaders configured for QuantileLoss. + """ + from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + + if params is None: + params = {} + data_loader_kwargs = params.get("data_loader_kwargs", {}) + # For quantile metrics, default target is "target" + data_loader_kwargs.setdefault("target", "target") + + data = data_with_covariates() + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 4131bd582..652aa28f0 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -27,33 +27,8 @@ NormalDistributionLoss_pkg, ) -POINT_LOSSES_NUMERIC = [ - MAE(), - RMSE(), - SMAPE(), - MAPE(), - PoissonLoss(), - MASE(), - TweedieLoss(), -] - -POINT_LOSSES_CATEGORY = [ - CrossEntropy(), -] - -QUANTILE_LOSSES_NUMERIC = [ - QuantileLoss(), -] - -DISTR_LOSSES_NUMERIC = [ - NormalDistributionLoss(), - NegativeBinomialDistributionLoss(), - MultivariateNormalDistributionLoss(), - LogNormalDistributionLoss(), - BetaDistributionLoss(), - ImplicitQuantileNetworkDistributionLoss(), - # todo: still need some debugging to add the MQF2DistributionLoss -] +# Remove legacy lists and mappings for losses by pred/y type and tensor shape checks. +# Use tags and _get_test_dataloaders_from for all compatibility and test setup. METRIC_PKGS = [ BetaDistributionLoss_pkg, @@ -64,15 +39,6 @@ ImplicitQuantileNetworkDistributionLoss_pkg ] -LOSSES_BY_PRED_AND_Y_TYPE = { - ("point", "numeric"): POINT_LOSSES_NUMERIC, - ("point", "category"): POINT_LOSSES_CATEGORY, - ("quantile", "numeric"): QUANTILE_LOSSES_NUMERIC, - ("quantile", "category"): [], - ("distr", "numeric"): DISTR_LOSSES_NUMERIC, - ("distr", "category"): [], -} - LOSS_SPECIFIC_PARAMS = { pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { k: v for k, v in pkg._tags.items() if k not in ["metric_type", "distribution_type", "info:metric_name", "requires:data_type"] @@ -80,28 +46,30 @@ for pkg in METRIC_PKGS } - def get_compatible_losses(pred_types, y_types): - """Get compatible losses based on prediction types and target types. - - Parameters - ---------- - pred_types : list of str - Prediction types, e.g., ["point", "distr"] - y_types : list of str - Target types, e.g., ["numeric", "category"] - - Returns - ------- - list - List of compatible loss instances + """ + Get compatible losses based on prediction types and target types. """ compatible_losses = [] + for pkg in METRIC_PKGS: + pkg_pred_types = pkg._tags.get("compatible_pred_types", []) + pkg_y_types = pkg._tags.get("compatible_y_types", []) + if any(pt in pred_types for pt in pkg_pred_types) and any(yt in y_types for yt in pkg_y_types): + compatible_losses.append(pkg.get_cls()()) + return compatible_losses - for pred_type in pred_types: - for y_type in y_types: - key = (pred_type, y_type) - if key in LOSSES_BY_PRED_AND_Y_TYPE: - compatible_losses.extend(LOSSES_BY_PRED_AND_Y_TYPE[key]) +def get_test_dataloaders_for_loss(pkg, params=None): + """ + Get test dataloaders for a given loss package using its tags and method. + """ + return pkg._get_test_dataloaders_from(params or {}) - return compatible_losses \ No newline at end of file +def check_loss_output_shape(pkg, y_pred, y_true): + """ + Check that the output shape of the loss matches the expected shape from tags. + """ + expected_ndim = pkg._tags.get("expected_loss_ndim", None) + loss_instance = pkg.get_cls()() + result = loss_instance(y_pred, y_true) + if expected_ndim is not None: + assert result.ndim == expected_ndim diff --git a/pytorch_forecasting/tests/test_all_estimators.py b/pytorch_forecasting/tests/test_all_estimators.py index d8eb7d81e..08a996c52 100644 --- a/pytorch_forecasting/tests/test_all_estimators.py +++ b/pytorch_forecasting/tests/test_all_estimators.py @@ -13,7 +13,6 @@ from pytorch_forecasting.tests._config import EXCLUDE_ESTIMATORS, EXCLUDED_TESTS from pytorch_forecasting.tests._loss_mapping import ( LOSS_SPECIFIC_PARAMS, - LOSSES_BY_PRED_AND_Y_TYPE, get_compatible_losses, ) From a0b814991e53a1a6ecf8f5bacdea9330bfec1efe Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Mon, 8 Sep 2025 18:45:02 +0530 Subject: [PATCH 03/22] Updates --- docs/source/tutorials/ar.ipynb | 32 ++++++++++--- docs/source/tutorials/deepar.ipynb | 35 ++++++++++---- docs/source/tutorials/nhits.ipynb | 46 ++++++++++++++----- .../_beta/_beta_distribution_loss_pkg.py | 10 ++-- ..._quantile_network_distribution_loss_pkg.py | 7 ++- .../_log_normal_distribution_loss_pkg.py | 8 +++- .../_mqf2/_mqf2_distribution_loss_pkg.py | 3 +- ...ltivariate_normal_distribution_loss_pkg.py | 10 ++-- ...negative_binomial_distribution_loss_pkg.py | 8 +++- .../_normal/_normal_distribution_loss_pkg.py | 5 +- .../_cross_entropy/_cross_entropy_pkg.py | 5 +- .../metrics/_point_pkg/_mae/_mae_pkg.py | 5 +- .../metrics/_point_pkg/_mape/_mape_pkg.py | 5 +- .../metrics/_point_pkg/_mase/_mase_pkg.py | 5 +- .../_point_pkg/_poisson/_poisson_loss_pkg.py | 5 +- .../metrics/_point_pkg/_rmse/_rmse_pkg.py | 5 +- .../metrics/_point_pkg/_smape/_smape_pkg.py | 5 +- .../_point_pkg/_tweedie/_tweedie_loss_pkg.py | 5 +- .../_quantile_pkg/_quantile_loss_pkg.py | 5 +- pytorch_forecasting/tests/_loss_mapping.py | 22 +++++++-- 20 files changed, 177 insertions(+), 54 deletions(-) diff --git a/docs/source/tutorials/ar.ipynb b/docs/source/tutorials/ar.ipynb index 7cbf0fbc6..09bc1414f 100644 --- a/docs/source/tutorials/ar.ipynb +++ b/docs/source/tutorials/ar.ipynb @@ -187,10 +187,16 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", + "validation = TimeSeriesDataSet.from_dataset(\n", + " training, data, min_prediction_idx=training_cutoff + 1\n", + ")\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", - "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" + "train_dataloader = training.to_dataloader(\n", + " train=True, batch_size=batch_size, num_workers=0\n", + ")\n", + "val_dataloader = validation.to_dataloader(\n", + " train=False, batch_size=batch_size, num_workers=0\n", + ")" ] }, { @@ -269,7 +275,13 @@ "source": [ "pl.seed_everything(42)\n", "trainer = pl.Trainer(accelerator=\"auto\", gradient_clip_val=0.1)\n", - "net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2, widths=[32, 512], backcast_loss_ratio=0.1)" + "net = NBeats.from_dataset(\n", + " training,\n", + " learning_rate=3e-2,\n", + " weight_decay=1e-2,\n", + " widths=[32, 512],\n", + " backcast_loss_ratio=0.1,\n", + ")" ] }, { @@ -323,7 +335,9 @@ "# find optimal learning rate\n", "from lightning.pytorch.tuner import Tuner\n", "\n", - "res = Tuner(trainer).lr_find(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5)\n", + "res = Tuner(trainer).lr_find(\n", + " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5\n", + ")\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", "fig.show()\n", @@ -443,7 +457,9 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", + "early_stop_callback = EarlyStopping(\n", + " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", + ")\n", "trainer = pl.Trainer(\n", " max_epochs=3,\n", " accelerator=\"auto\",\n", @@ -645,7 +661,9 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" + " best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", + " )" ] }, { diff --git a/docs/source/tutorials/deepar.ipynb b/docs/source/tutorials/deepar.ipynb index b6dd1fc1a..59b975fa9 100644 --- a/docs/source/tutorials/deepar.ipynb +++ b/docs/source/tutorials/deepar.ipynb @@ -184,7 +184,9 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", + "validation = TimeSeriesDataSet.from_dataset(\n", + " training, data, min_prediction_idx=training_cutoff + 1\n", + ")\n", "batch_size = 128\n", "# synchronize samples in each batch over time - only necessary for DeepVAR, not for DeepAR\n", "train_dataloader = training.to_dataloader(\n", @@ -240,7 +242,9 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "baseline_predictions = Baseline().predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -845,7 +849,9 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", + "early_stop_callback = EarlyStopping(\n", + " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", + ")\n", "trainer = pl.Trainer(\n", " max_epochs=30,\n", " accelerator=\"cpu\",\n", @@ -913,7 +919,9 @@ ], "source": [ "# best_model = net\n", - "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "predictions = best_model.predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -935,7 +943,11 @@ ], "source": [ "raw_predictions = net.predict(\n", - " val_dataloader, mode=\"raw\", return_x=True, n_samples=100, trainer_kwargs=dict(accelerator=\"cpu\")\n", + " val_dataloader,\n", + " mode=\"raw\",\n", + " return_x=True,\n", + " n_samples=100,\n", + " trainer_kwargs=dict(accelerator=\"cpu\"),\n", ")" ] }, @@ -1148,7 +1160,9 @@ "source": [ "series = validation.x_to_index(raw_predictions.x)[\"series\"]\n", "for idx in range(20): # plot 10 examples\n", - " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)\n", + " best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", + " )\n", " plt.suptitle(f\"Series: {series.iloc[idx]}\")" ] }, @@ -1199,12 +1213,17 @@ "source": [ "cov_matrix = best_model.loss.map_x_to_distribution(\n", " best_model.predict(\n", - " val_dataloader, mode=(\"raw\", \"prediction\"), n_samples=None, trainer_kwargs=dict(accelerator=\"cpu\")\n", + " val_dataloader,\n", + " mode=(\"raw\", \"prediction\"),\n", + " n_samples=None,\n", + " trainer_kwargs=dict(accelerator=\"cpu\"),\n", " )\n", ").base_dist.covariance_matrix.mean(0)\n", "\n", "# normalize the covariance matrix diagnoal to 1.0\n", - "correlation_matrix = cov_matrix / torch.sqrt(torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T)\n", + "correlation_matrix = cov_matrix / torch.sqrt(\n", + " torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T\n", + ")\n", "\n", "fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n", "ax.imshow(correlation_matrix, cmap=\"bwr\")" diff --git a/docs/source/tutorials/nhits.ipynb b/docs/source/tutorials/nhits.ipynb index bb79ae57a..2416c9ef5 100644 --- a/docs/source/tutorials/nhits.ipynb +++ b/docs/source/tutorials/nhits.ipynb @@ -189,10 +189,16 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", + "validation = TimeSeriesDataSet.from_dataset(\n", + " training, data, min_prediction_idx=training_cutoff + 1\n", + ")\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", - "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" + "train_dataloader = training.to_dataloader(\n", + " train=True, batch_size=batch_size, num_workers=0\n", + ")\n", + "val_dataloader = validation.to_dataloader(\n", + " train=False, batch_size=batch_size, num_workers=0\n", + ")" ] }, { @@ -240,7 +246,9 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "baseline_predictions = Baseline().predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -348,7 +356,11 @@ "from lightning.pytorch.tuner import Tuner\n", "\n", "res = Tuner(trainer).lr_find(\n", - " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e-1\n", + " net,\n", + " train_dataloaders=train_dataloader,\n", + " val_dataloaders=val_dataloader,\n", + " min_lr=1e-5,\n", + " max_lr=1e-1,\n", ")\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", @@ -498,7 +510,9 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", + "early_stop_callback = EarlyStopping(\n", + " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", + ")\n", "trainer = pl.Trainer(\n", " max_epochs=5,\n", " accelerator=\"cpu\",\n", @@ -583,7 +597,9 @@ } ], "source": [ - "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "predictions = best_model.predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -612,7 +628,9 @@ } ], "source": [ - "raw_predictions = best_model.predict(val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\"))" + "raw_predictions = best_model.predict(\n", + " val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\")\n", + ")" ] }, { @@ -723,7 +741,9 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" + " best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", + " )" ] }, { @@ -810,10 +830,14 @@ ], "source": [ "# sample 500 paths\n", - "samples = best_model.loss.sample(raw_predictions.output[\"prediction\"][[0]], n_samples=500)[0]\n", + "samples = best_model.loss.sample(\n", + " raw_predictions.output[\"prediction\"][[0]], n_samples=500\n", + ")[0]\n", "\n", "# plot prediction\n", - "fig = best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True)\n", + "fig = best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True\n", + ")\n", "ax = fig.get_axes()[0]\n", "# plot first two sampled paths\n", "ax.plot(samples[:, 0], color=\"g\", label=\"Sample 1\")\n", diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index 50160bc0d..70e17f5a6 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -3,8 +3,9 @@ """ from pytorch_forecasting.data import TorchNormalizer -from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric from pytorch_forecasting.data.encoders import GroupNormalizer +from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric + class BetaDistributionLoss_pkg(_BasePtMetric): """ @@ -39,13 +40,16 @@ def get_encoder(cls): Returns a TorchNormalizer instance for rescaling parameters. """ return TorchNormalizer(transformation="logit") - + @classmethod def _get_test_dataloaders_from(cls, params=None): """ Return test dataloaders configured for BetaDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py index 61b8bc8d7..9b463a9b5 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py @@ -52,7 +52,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for ImplicitQuantileNetworkDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} @@ -60,5 +63,5 @@ def _get_test_dataloaders_from(cls, params=None): data_loader_kwargs.update(params.get("data_loader_kwargs", {})) data = data_with_covariates() - dataloaders = make_dataloaders(data **data_loader_kwargs) + dataloaders = make_dataloaders(data**data_loader_kwargs) return dataloaders diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index b3d072317..f0806a898 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -5,8 +5,9 @@ import torch from pytorch_forecasting.data import TorchNormalizer -from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric from pytorch_forecasting.data.encoders import GroupNormalizer +from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric + class LogNormalDistributionLoss_pkg(_BasePtMetric): """ @@ -63,7 +64,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for LogNormalDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py index 11e536c96..25182c279 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py @@ -3,8 +3,9 @@ """ from pytorch_forecasting.data import TorchNormalizer -from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric from pytorch_forecasting.data.encoders import GroupNormalizer +from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric + class MQF2DistributionLoss_pkg(_BasePtMetric): """ diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 247edf0bd..92aca1ade 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -2,8 +2,9 @@ Package container for multivariate normal distribution loss metric. """ -from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric from pytorch_forecasting.data.encoders import GroupNormalizer +from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric + class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): """ @@ -40,7 +41,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MultivariateNormalDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} @@ -49,4 +53,4 @@ def _get_test_dataloaders_from(cls, params=None): data = data_with_covariates() dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders \ No newline at end of file + return dataloaders diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index 63625f6f5..8b30945fe 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -3,8 +3,9 @@ """ from pytorch_forecasting.data import TorchNormalizer -from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric from pytorch_forecasting.data.encoders import GroupNormalizer +from pytorch_forecasting.metrics.base_metrics._base_object import _BasePtMetric + class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): """ @@ -45,7 +46,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for NegativeBinomialDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py index 9b4133073..84fbfeef2 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py @@ -33,7 +33,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for NormalDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py index 5104d5c69..b3a12fa5e 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py @@ -33,7 +33,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for CrossEntropy. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py index a82111fd0..29c3a5c89 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py @@ -31,7 +31,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MAE. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py index 5cc673d38..bec4cfa04 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py @@ -33,7 +33,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MAPE. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py index eb3f428eb..7916547e0 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py @@ -29,7 +29,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MASE. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py index 61aab5af1..3f4e6f091 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py @@ -33,7 +33,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for PoissonLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py index 43b5df235..c14e1f6ac 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py @@ -31,7 +31,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for RMSE. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py index f002c8037..77c24dc10 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py @@ -33,7 +33,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for SMAPE. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py index 1f2207b10..953e4ed4e 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py @@ -31,7 +31,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for TweedieLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 1c2a41054..105b0f820 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -42,7 +42,10 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for QuantileLoss. """ - from pytorch_forecasting.tests._data_scenarios import data_with_covariates, make_dataloaders + from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, + ) if params is None: params = {} diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 652aa28f0..351280497 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -16,7 +16,6 @@ QuantileLoss, TweedieLoss, ) - from pytorch_forecasting.metrics._distributions_pkg import ( BetaDistributionLoss_pkg, ImplicitQuantileNetworkDistributionLoss_pkg, @@ -34,18 +33,27 @@ BetaDistributionLoss_pkg, NegativeBinomialDistributionLoss_pkg, MultivariateNormalDistributionLoss_pkg, - LogNormalDistributionLoss_pkg, + LogNormalDistributionLoss_pkg, NormalDistributionLoss_pkg, - ImplicitQuantileNetworkDistributionLoss_pkg + ImplicitQuantileNetworkDistributionLoss_pkg, ] LOSS_SPECIFIC_PARAMS = { pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { - k: v for k, v in pkg._tags.items() if k not in ["metric_type", "distribution_type", "info:metric_name", "requires:data_type"] + k: v + for k, v in pkg._tags.items() + if k + not in [ + "metric_type", + "distribution_type", + "info:metric_name", + "requires:data_type", + ] } for pkg in METRIC_PKGS } + def get_compatible_losses(pred_types, y_types): """ Get compatible losses based on prediction types and target types. @@ -54,16 +62,20 @@ def get_compatible_losses(pred_types, y_types): for pkg in METRIC_PKGS: pkg_pred_types = pkg._tags.get("compatible_pred_types", []) pkg_y_types = pkg._tags.get("compatible_y_types", []) - if any(pt in pred_types for pt in pkg_pred_types) and any(yt in y_types for yt in pkg_y_types): + if any(pt in pred_types for pt in pkg_pred_types) and any( + yt in y_types for yt in pkg_y_types + ): compatible_losses.append(pkg.get_cls()()) return compatible_losses + def get_test_dataloaders_for_loss(pkg, params=None): """ Get test dataloaders for a given loss package using its tags and method. """ return pkg._get_test_dataloaders_from(params or {}) + def check_loss_output_shape(pkg, y_pred, y_true): """ Check that the output shape of the loss matches the expected shape from tags. From ba5100da5e9b2560ef2e26955dd68689ec33aacf Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Mon, 8 Sep 2025 18:57:45 +0530 Subject: [PATCH 04/22] Updates --- pytorch_forecasting/models/nbeats/_nbeats_pkg.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pytorch_forecasting/models/nbeats/_nbeats_pkg.py b/pytorch_forecasting/models/nbeats/_nbeats_pkg.py index daeab1c4e..a6f008771 100644 --- a/pytorch_forecasting/models/nbeats/_nbeats_pkg.py +++ b/pytorch_forecasting/models/nbeats/_nbeats_pkg.py @@ -40,6 +40,16 @@ def get_base_test_params(cls): """ return [{}, {"backcast_loss_ratio": 1.0}] + @classmethod + def get_test_train_params(cls): + """ + Return a list of parameter dictionaries for integration tests. + """ + return [ + {}, + {"backcast_loss_ratio": 1.0}, + ] + @classmethod def _get_test_dataloaders_from(cls, params): """Get dataloaders from parameters. From e7a7e6738816f8d2b6cc60c55566ba1d844afcbd Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Wed, 10 Sep 2025 19:32:14 +0530 Subject: [PATCH 05/22] Updates --- docs/source/tutorials/ar.ipynb | 32 +++++---------------- docs/source/tutorials/deepar.ipynb | 35 ++++++----------------- docs/source/tutorials/nhits.ipynb | 46 +++++++----------------------- 3 files changed, 26 insertions(+), 87 deletions(-) diff --git a/docs/source/tutorials/ar.ipynb b/docs/source/tutorials/ar.ipynb index 09bc1414f..7cbf0fbc6 100644 --- a/docs/source/tutorials/ar.ipynb +++ b/docs/source/tutorials/ar.ipynb @@ -187,16 +187,10 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(\n", - " training, data, min_prediction_idx=training_cutoff + 1\n", - ")\n", + "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(\n", - " train=True, batch_size=batch_size, num_workers=0\n", - ")\n", - "val_dataloader = validation.to_dataloader(\n", - " train=False, batch_size=batch_size, num_workers=0\n", - ")" + "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", + "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" ] }, { @@ -275,13 +269,7 @@ "source": [ "pl.seed_everything(42)\n", "trainer = pl.Trainer(accelerator=\"auto\", gradient_clip_val=0.1)\n", - "net = NBeats.from_dataset(\n", - " training,\n", - " learning_rate=3e-2,\n", - " weight_decay=1e-2,\n", - " widths=[32, 512],\n", - " backcast_loss_ratio=0.1,\n", - ")" + "net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2, widths=[32, 512], backcast_loss_ratio=0.1)" ] }, { @@ -335,9 +323,7 @@ "# find optimal learning rate\n", "from lightning.pytorch.tuner import Tuner\n", "\n", - "res = Tuner(trainer).lr_find(\n", - " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5\n", - ")\n", + "res = Tuner(trainer).lr_find(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5)\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", "fig.show()\n", @@ -457,9 +443,7 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", - ")\n", + "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", "trainer = pl.Trainer(\n", " max_epochs=3,\n", " accelerator=\"auto\",\n", @@ -661,9 +645,7 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", - " )" + " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" ] }, { diff --git a/docs/source/tutorials/deepar.ipynb b/docs/source/tutorials/deepar.ipynb index 59b975fa9..b6dd1fc1a 100644 --- a/docs/source/tutorials/deepar.ipynb +++ b/docs/source/tutorials/deepar.ipynb @@ -184,9 +184,7 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(\n", - " training, data, min_prediction_idx=training_cutoff + 1\n", - ")\n", + "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", "batch_size = 128\n", "# synchronize samples in each batch over time - only necessary for DeepVAR, not for DeepAR\n", "train_dataloader = training.to_dataloader(\n", @@ -242,9 +240,7 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -849,9 +845,7 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", - ")\n", + "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", "trainer = pl.Trainer(\n", " max_epochs=30,\n", " accelerator=\"cpu\",\n", @@ -919,9 +913,7 @@ ], "source": [ "# best_model = net\n", - "predictions = best_model.predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -943,11 +935,7 @@ ], "source": [ "raw_predictions = net.predict(\n", - " val_dataloader,\n", - " mode=\"raw\",\n", - " return_x=True,\n", - " n_samples=100,\n", - " trainer_kwargs=dict(accelerator=\"cpu\"),\n", + " val_dataloader, mode=\"raw\", return_x=True, n_samples=100, trainer_kwargs=dict(accelerator=\"cpu\")\n", ")" ] }, @@ -1160,9 +1148,7 @@ "source": [ "series = validation.x_to_index(raw_predictions.x)[\"series\"]\n", "for idx in range(20): # plot 10 examples\n", - " best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", - " )\n", + " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)\n", " plt.suptitle(f\"Series: {series.iloc[idx]}\")" ] }, @@ -1213,17 +1199,12 @@ "source": [ "cov_matrix = best_model.loss.map_x_to_distribution(\n", " best_model.predict(\n", - " val_dataloader,\n", - " mode=(\"raw\", \"prediction\"),\n", - " n_samples=None,\n", - " trainer_kwargs=dict(accelerator=\"cpu\"),\n", + " val_dataloader, mode=(\"raw\", \"prediction\"), n_samples=None, trainer_kwargs=dict(accelerator=\"cpu\")\n", " )\n", ").base_dist.covariance_matrix.mean(0)\n", "\n", "# normalize the covariance matrix diagnoal to 1.0\n", - "correlation_matrix = cov_matrix / torch.sqrt(\n", - " torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T\n", - ")\n", + "correlation_matrix = cov_matrix / torch.sqrt(torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T)\n", "\n", "fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n", "ax.imshow(correlation_matrix, cmap=\"bwr\")" diff --git a/docs/source/tutorials/nhits.ipynb b/docs/source/tutorials/nhits.ipynb index 2416c9ef5..bb79ae57a 100644 --- a/docs/source/tutorials/nhits.ipynb +++ b/docs/source/tutorials/nhits.ipynb @@ -189,16 +189,10 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(\n", - " training, data, min_prediction_idx=training_cutoff + 1\n", - ")\n", + "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(\n", - " train=True, batch_size=batch_size, num_workers=0\n", - ")\n", - "val_dataloader = validation.to_dataloader(\n", - " train=False, batch_size=batch_size, num_workers=0\n", - ")" + "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", + "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" ] }, { @@ -246,9 +240,7 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -356,11 +348,7 @@ "from lightning.pytorch.tuner import Tuner\n", "\n", "res = Tuner(trainer).lr_find(\n", - " net,\n", - " train_dataloaders=train_dataloader,\n", - " val_dataloaders=val_dataloader,\n", - " min_lr=1e-5,\n", - " max_lr=1e-1,\n", + " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e-1\n", ")\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", @@ -510,9 +498,7 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", - ")\n", + "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", "trainer = pl.Trainer(\n", " max_epochs=5,\n", " accelerator=\"cpu\",\n", @@ -597,9 +583,7 @@ } ], "source": [ - "predictions = best_model.predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -628,9 +612,7 @@ } ], "source": [ - "raw_predictions = best_model.predict(\n", - " val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\")\n", - ")" + "raw_predictions = best_model.predict(val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\"))" ] }, { @@ -741,9 +723,7 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", - " )" + " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" ] }, { @@ -830,14 +810,10 @@ ], "source": [ "# sample 500 paths\n", - "samples = best_model.loss.sample(\n", - " raw_predictions.output[\"prediction\"][[0]], n_samples=500\n", - ")[0]\n", + "samples = best_model.loss.sample(raw_predictions.output[\"prediction\"][[0]], n_samples=500)[0]\n", "\n", "# plot prediction\n", - "fig = best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True\n", - ")\n", + "fig = best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True)\n", "ax = fig.get_axes()[0]\n", "# plot first two sampled paths\n", "ax.plot(samples[:, 0], color=\"g\", label=\"Sample 1\")\n", From 4a82f611c2512e4c07299a63ca84b9ae7eb10e8b Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Wed, 10 Sep 2025 23:58:24 +0530 Subject: [PATCH 06/22] Updates --- .../_beta/_beta_distribution_loss_pkg.py | 4 +- ..._quantile_network_distribution_loss_pkg.py | 4 +- .../_log_normal_distribution_loss_pkg.py | 4 +- ...ltivariate_normal_distribution_loss_pkg.py | 4 +- ...negative_binomial_distribution_loss_pkg.py | 4 +- .../_normal/_normal_distribution_loss_pkg.py | 4 +- .../_cross_entropy/_cross_entropy_pkg.py | 4 +- .../metrics/_point_pkg/_mae/_mae_pkg.py | 4 +- .../metrics/_point_pkg/_mape/_mape_pkg.py | 4 +- .../metrics/_point_pkg/_mase/_mase_pkg.py | 4 +- .../_point_pkg/_poisson/_poisson_loss_pkg.py | 4 +- .../metrics/_point_pkg/_rmse/_rmse_pkg.py | 4 +- .../metrics/_point_pkg/_smape/_smape_pkg.py | 4 +- .../_point_pkg/_tweedie/_tweedie_loss_pkg.py | 4 +- .../_quantile_pkg/_quantile_loss_pkg.py | 4 +- pytorch_forecasting/tests/_loss_mapping.py | 45 ++++++++++--------- .../tests/test_all_estimators.py | 2 +- 17 files changed, 55 insertions(+), 52 deletions(-) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index 70e17f5a6..cc70d893e 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -23,8 +23,8 @@ class BetaDistributionLoss_pkg(_BasePtMetric): groups=["agency", "sku"], transformation="logit" ) }, - "compatible_pred_types": ["distr"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["distr"], + "info:y_type": ["numeric"], "expected_loss_ndim": 2, } diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py index 9b463a9b5..56696c781 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py @@ -18,8 +18,8 @@ class ImplicitQuantileNetworkDistributionLoss_pkg(_BasePtMetric): "requires:data_type": "implicit_quantile_network_distribution_forecast", "capability:quantile_generation": True, "shape:adds_quantile_dimension": True, - "compatible_pred_types": ["distr"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["distr"], + "info:y_type": ["numeric"], } @classmethod diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index f0806a898..327b4f6e4 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -25,8 +25,8 @@ class LogNormalDistributionLoss_pkg(_BasePtMetric): groups=["agency", "sku"], transformation="log1p" ) }, - "compatible_pred_types": ["distr"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["distr"], + "info:y_type": ["numeric"], "expected_loss_ndim": 2, } diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 92aca1ade..71fadc48c 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -23,8 +23,8 @@ class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): groups=["agency", "sku"], transformation="log1p" ) }, - "compatible_pred_types": ["distr"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["distr"], + "info:y_type": ["numeric"], "expected_loss_ndim": 2, } diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index 8b30945fe..f4dca1c39 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -21,8 +21,8 @@ class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): "data_loader_kwargs": { "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) }, - "compatible_pred_types": ["distr"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["distr"], + "info:y_type": ["numeric"], "expected_loss_ndim": 2, } diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py index 84fbfeef2..dd4cb617d 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py @@ -17,8 +17,8 @@ class NormalDistributionLoss_pkg(_BasePtMetric): "distribution_type": "normal", "info:metric_name": "NormalDistributionLoss", "requires:data_type": "normal_distribution_forecast", - "compatible_pred_types": ["distr"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["distr"], + "info:y_type": ["numeric"], "expected_loss_ndim": 2, } diff --git a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py index b3a12fa5e..f3be4d37e 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py @@ -18,8 +18,8 @@ class CrossEntropy_pkg(_BasePtMetric): "requires:data_type": "classification_forecast", "info:metric_name": "CrossEntropy", "no_rescaling": True, - "compatible_pred_types": ["point"], - "compatible_y_types": ["category"], + "info:pred_type": ["point"], + "info:y_type": ["category"], } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py index 29c3a5c89..471d3e523 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py @@ -16,8 +16,8 @@ class MAE_pkg(_BasePtMetric): "metric_type": "point", "requires:data_type": "point_forecast", "info:metric_name": "MAE", - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_type": ["numeric"], } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py index bec4cfa04..b28ff7768 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py @@ -18,8 +18,8 @@ class MAPE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "MAPE", "requires:data_type": "point_forecast", - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_type": ["numeric"], } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py index 7916547e0..5fa110710 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py @@ -14,8 +14,8 @@ class MASE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "MASE", "requires:data_type": "point_forecast", - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_type": ["numeric"], } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py index 3f4e6f091..fa9520b45 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py @@ -18,8 +18,8 @@ class PoissonLoss_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "capability:quantile_generation": True, "shape:adds_quantile_dimension": True, - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_type": ["numeric"], } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py index c14e1f6ac..facb2ebb5 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py @@ -16,8 +16,8 @@ class RMSE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "RMSE", "requires:data_type": "point_forecast", - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_type": ["numeric"], } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py index 77c24dc10..6dfca224f 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py @@ -18,8 +18,8 @@ class SMAPE_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "SMAPE", "requires:data_type": "point_forecast", - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_type": ["numeric"], } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py index 953e4ed4e..330164480 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py @@ -16,8 +16,8 @@ class TweedieLoss_pkg(_BasePtMetric): "metric_type": "point", "info:metric_name": "TweedieLoss", "requires:data_type": "point_forecast", - "compatible_pred_types": ["point"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["point"], + "info:y_types": ["numeric"], } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 105b0f820..8de6e33b2 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -16,8 +16,8 @@ class QuantileLoss_pkg(_BasePtMetric): "metric_type": "quantile", "info:metric_name": "QuantileLoss", "requires:data_type": "quantile_forecast", - "compatible_pred_types": ["quantile"], - "compatible_y_types": ["numeric"], + "info:pred_type": ["quantile"], + "info:y_type": ["numeric"], } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 351280497..8b3d0b61a 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -1,21 +1,3 @@ -from pytorch_forecasting.metrics import ( - MAE, - MAPE, - MASE, - RMSE, - SMAPE, - BetaDistributionLoss, - CrossEntropy, - ImplicitQuantileNetworkDistributionLoss, - LogNormalDistributionLoss, - MQF2DistributionLoss, - MultivariateNormalDistributionLoss, - NegativeBinomialDistributionLoss, - NormalDistributionLoss, - PoissonLoss, - QuantileLoss, - TweedieLoss, -) from pytorch_forecasting.metrics._distributions_pkg import ( BetaDistributionLoss_pkg, ImplicitQuantileNetworkDistributionLoss_pkg, @@ -26,6 +8,19 @@ NormalDistributionLoss_pkg, ) +from pytorch_forecasting.metrics._point_pkg import ( + MAE_pkg, + MAPE_pkg, + MASE_pkg, + RMSE_pkg, + SMAPE_pkg, + PoissonLoss_pkg, + CrossEntropy_pkg, + TweedieLoss_pkg, +) + +from pytorch_forecasting.metrics._quantile_pkg import QuantileLoss_pkg + # Remove legacy lists and mappings for losses by pred/y type and tensor shape checks. # Use tags and _get_test_dataloaders_from for all compatibility and test setup. @@ -36,6 +31,15 @@ LogNormalDistributionLoss_pkg, NormalDistributionLoss_pkg, ImplicitQuantileNetworkDistributionLoss_pkg, + MAE_pkg, + MAPE_pkg, + MASE_pkg, + RMSE_pkg, + SMAPE_pkg, + PoissonLoss_pkg, + TweedieLoss_pkg, + CrossEntropy_pkg, + QuantileLoss_pkg ] LOSS_SPECIFIC_PARAMS = { @@ -53,15 +57,14 @@ for pkg in METRIC_PKGS } - def get_compatible_losses(pred_types, y_types): """ Get compatible losses based on prediction types and target types. """ compatible_losses = [] for pkg in METRIC_PKGS: - pkg_pred_types = pkg._tags.get("compatible_pred_types", []) - pkg_y_types = pkg._tags.get("compatible_y_types", []) + pkg_pred_types = pkg._tags.get("info:pred_type", []) + pkg_y_types = pkg._tags.get("info:y_type", []) if any(pt in pred_types for pt in pkg_pred_types) and any( yt in y_types for yt in pkg_y_types ): diff --git a/pytorch_forecasting/tests/test_all_estimators.py b/pytorch_forecasting/tests/test_all_estimators.py index 08a996c52..ec7383136 100644 --- a/pytorch_forecasting/tests/test_all_estimators.py +++ b/pytorch_forecasting/tests/test_all_estimators.py @@ -413,4 +413,4 @@ def test_pkg_linkage(self, object_pkg, object_class): "The expected package name is " f"{object_class.__name__}_pkg." ) - assert object_pkg.__name__ == object_class.__name__ + "_pkg", msg + assert object_pkg.__name__ == object_class.__name__ + "_pkg", msg \ No newline at end of file From 1b76b451b11eeb72d23db45c4b970bc9c4ce94a2 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Thu, 11 Sep 2025 00:29:03 +0530 Subject: [PATCH 07/22] Updates --- docs/source/tutorials/ar.ipynb | 32 ++++++++++--- docs/source/tutorials/deepar.ipynb | 35 ++++++++++---- docs/source/tutorials/nhits.ipynb | 46 ++++++++++++++----- pytorch_forecasting/tests/_loss_mapping.py | 9 ++-- .../tests/test_all_estimators.py | 2 +- 5 files changed, 92 insertions(+), 32 deletions(-) diff --git a/docs/source/tutorials/ar.ipynb b/docs/source/tutorials/ar.ipynb index 7cbf0fbc6..09bc1414f 100644 --- a/docs/source/tutorials/ar.ipynb +++ b/docs/source/tutorials/ar.ipynb @@ -187,10 +187,16 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", + "validation = TimeSeriesDataSet.from_dataset(\n", + " training, data, min_prediction_idx=training_cutoff + 1\n", + ")\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", - "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" + "train_dataloader = training.to_dataloader(\n", + " train=True, batch_size=batch_size, num_workers=0\n", + ")\n", + "val_dataloader = validation.to_dataloader(\n", + " train=False, batch_size=batch_size, num_workers=0\n", + ")" ] }, { @@ -269,7 +275,13 @@ "source": [ "pl.seed_everything(42)\n", "trainer = pl.Trainer(accelerator=\"auto\", gradient_clip_val=0.1)\n", - "net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2, widths=[32, 512], backcast_loss_ratio=0.1)" + "net = NBeats.from_dataset(\n", + " training,\n", + " learning_rate=3e-2,\n", + " weight_decay=1e-2,\n", + " widths=[32, 512],\n", + " backcast_loss_ratio=0.1,\n", + ")" ] }, { @@ -323,7 +335,9 @@ "# find optimal learning rate\n", "from lightning.pytorch.tuner import Tuner\n", "\n", - "res = Tuner(trainer).lr_find(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5)\n", + "res = Tuner(trainer).lr_find(\n", + " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5\n", + ")\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", "fig.show()\n", @@ -443,7 +457,9 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", + "early_stop_callback = EarlyStopping(\n", + " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", + ")\n", "trainer = pl.Trainer(\n", " max_epochs=3,\n", " accelerator=\"auto\",\n", @@ -645,7 +661,9 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" + " best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", + " )" ] }, { diff --git a/docs/source/tutorials/deepar.ipynb b/docs/source/tutorials/deepar.ipynb index b6dd1fc1a..59b975fa9 100644 --- a/docs/source/tutorials/deepar.ipynb +++ b/docs/source/tutorials/deepar.ipynb @@ -184,7 +184,9 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", + "validation = TimeSeriesDataSet.from_dataset(\n", + " training, data, min_prediction_idx=training_cutoff + 1\n", + ")\n", "batch_size = 128\n", "# synchronize samples in each batch over time - only necessary for DeepVAR, not for DeepAR\n", "train_dataloader = training.to_dataloader(\n", @@ -240,7 +242,9 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "baseline_predictions = Baseline().predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -845,7 +849,9 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", + "early_stop_callback = EarlyStopping(\n", + " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", + ")\n", "trainer = pl.Trainer(\n", " max_epochs=30,\n", " accelerator=\"cpu\",\n", @@ -913,7 +919,9 @@ ], "source": [ "# best_model = net\n", - "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "predictions = best_model.predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -935,7 +943,11 @@ ], "source": [ "raw_predictions = net.predict(\n", - " val_dataloader, mode=\"raw\", return_x=True, n_samples=100, trainer_kwargs=dict(accelerator=\"cpu\")\n", + " val_dataloader,\n", + " mode=\"raw\",\n", + " return_x=True,\n", + " n_samples=100,\n", + " trainer_kwargs=dict(accelerator=\"cpu\"),\n", ")" ] }, @@ -1148,7 +1160,9 @@ "source": [ "series = validation.x_to_index(raw_predictions.x)[\"series\"]\n", "for idx in range(20): # plot 10 examples\n", - " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)\n", + " best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", + " )\n", " plt.suptitle(f\"Series: {series.iloc[idx]}\")" ] }, @@ -1199,12 +1213,17 @@ "source": [ "cov_matrix = best_model.loss.map_x_to_distribution(\n", " best_model.predict(\n", - " val_dataloader, mode=(\"raw\", \"prediction\"), n_samples=None, trainer_kwargs=dict(accelerator=\"cpu\")\n", + " val_dataloader,\n", + " mode=(\"raw\", \"prediction\"),\n", + " n_samples=None,\n", + " trainer_kwargs=dict(accelerator=\"cpu\"),\n", " )\n", ").base_dist.covariance_matrix.mean(0)\n", "\n", "# normalize the covariance matrix diagnoal to 1.0\n", - "correlation_matrix = cov_matrix / torch.sqrt(torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T)\n", + "correlation_matrix = cov_matrix / torch.sqrt(\n", + " torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T\n", + ")\n", "\n", "fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n", "ax.imshow(correlation_matrix, cmap=\"bwr\")" diff --git a/docs/source/tutorials/nhits.ipynb b/docs/source/tutorials/nhits.ipynb index bb79ae57a..2416c9ef5 100644 --- a/docs/source/tutorials/nhits.ipynb +++ b/docs/source/tutorials/nhits.ipynb @@ -189,10 +189,16 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", + "validation = TimeSeriesDataSet.from_dataset(\n", + " training, data, min_prediction_idx=training_cutoff + 1\n", + ")\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", - "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" + "train_dataloader = training.to_dataloader(\n", + " train=True, batch_size=batch_size, num_workers=0\n", + ")\n", + "val_dataloader = validation.to_dataloader(\n", + " train=False, batch_size=batch_size, num_workers=0\n", + ")" ] }, { @@ -240,7 +246,9 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "baseline_predictions = Baseline().predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -348,7 +356,11 @@ "from lightning.pytorch.tuner import Tuner\n", "\n", "res = Tuner(trainer).lr_find(\n", - " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e-1\n", + " net,\n", + " train_dataloaders=train_dataloader,\n", + " val_dataloaders=val_dataloader,\n", + " min_lr=1e-5,\n", + " max_lr=1e-1,\n", ")\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", @@ -498,7 +510,9 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", + "early_stop_callback = EarlyStopping(\n", + " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", + ")\n", "trainer = pl.Trainer(\n", " max_epochs=5,\n", " accelerator=\"cpu\",\n", @@ -583,7 +597,9 @@ } ], "source": [ - "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", + "predictions = best_model.predict(\n", + " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", + ")\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -612,7 +628,9 @@ } ], "source": [ - "raw_predictions = best_model.predict(val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\"))" + "raw_predictions = best_model.predict(\n", + " val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\")\n", + ")" ] }, { @@ -723,7 +741,9 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" + " best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", + " )" ] }, { @@ -810,10 +830,14 @@ ], "source": [ "# sample 500 paths\n", - "samples = best_model.loss.sample(raw_predictions.output[\"prediction\"][[0]], n_samples=500)[0]\n", + "samples = best_model.loss.sample(\n", + " raw_predictions.output[\"prediction\"][[0]], n_samples=500\n", + ")[0]\n", "\n", "# plot prediction\n", - "fig = best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True)\n", + "fig = best_model.plot_prediction(\n", + " raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True\n", + ")\n", "ax = fig.get_axes()[0]\n", "# plot first two sampled paths\n", "ax.plot(samples[:, 0], color=\"g\", label=\"Sample 1\")\n", diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 8b3d0b61a..96188e87b 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -7,18 +7,16 @@ NegativeBinomialDistributionLoss_pkg, NormalDistributionLoss_pkg, ) - from pytorch_forecasting.metrics._point_pkg import ( + CrossEntropy_pkg, MAE_pkg, MAPE_pkg, MASE_pkg, + PoissonLoss_pkg, RMSE_pkg, SMAPE_pkg, - PoissonLoss_pkg, - CrossEntropy_pkg, TweedieLoss_pkg, ) - from pytorch_forecasting.metrics._quantile_pkg import QuantileLoss_pkg # Remove legacy lists and mappings for losses by pred/y type and tensor shape checks. @@ -39,7 +37,7 @@ PoissonLoss_pkg, TweedieLoss_pkg, CrossEntropy_pkg, - QuantileLoss_pkg + QuantileLoss_pkg, ] LOSS_SPECIFIC_PARAMS = { @@ -57,6 +55,7 @@ for pkg in METRIC_PKGS } + def get_compatible_losses(pred_types, y_types): """ Get compatible losses based on prediction types and target types. diff --git a/pytorch_forecasting/tests/test_all_estimators.py b/pytorch_forecasting/tests/test_all_estimators.py index ec7383136..08a996c52 100644 --- a/pytorch_forecasting/tests/test_all_estimators.py +++ b/pytorch_forecasting/tests/test_all_estimators.py @@ -413,4 +413,4 @@ def test_pkg_linkage(self, object_pkg, object_class): "The expected package name is " f"{object_class.__name__}_pkg." ) - assert object_pkg.__name__ == object_class.__name__ + "_pkg", msg \ No newline at end of file + assert object_pkg.__name__ == object_class.__name__ + "_pkg", msg From 23b2821785574c8e02aac4ffb4951740cd6037de Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Thu, 11 Sep 2025 01:55:32 +0530 Subject: [PATCH 08/22] Updates --- pytorch_forecasting/tests/_loss_mapping.py | 40 ++++++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 96188e87b..704321fe3 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -7,6 +7,17 @@ NegativeBinomialDistributionLoss_pkg, NormalDistributionLoss_pkg, ) + +from pytorch_forecasting.metrics import ( + BetaDistributionLoss, + ImplicitQuantileNetworkDistributionLoss, + LogNormalDistributionLoss, + MQF2DistributionLoss, + MultivariateNormalDistributionLoss, + NegativeBinomialDistributionLoss, + NormalDistributionLoss, +) + from pytorch_forecasting.metrics._point_pkg import ( CrossEntropy_pkg, MAE_pkg, @@ -40,17 +51,32 @@ QuantileLoss_pkg, ] +DISTR_LOSSES_NUMERIC = [ + NormalDistributionLoss(), + NegativeBinomialDistributionLoss(), + MultivariateNormalDistributionLoss(), + LogNormalDistributionLoss(), + BetaDistributionLoss(), + ImplicitQuantileNetworkDistributionLoss(), + # todo: still need some debugging to add the MQF2DistributionLoss +] + LOSS_SPECIFIC_PARAMS = { pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { k: v for k, v in pkg._tags.items() - if k - not in [ - "metric_type", - "distribution_type", - "info:metric_name", - "requires:data_type", - ] + if not ( + k.startswith("info:") or + k.startswith("capability:") or + k.startswith("shape:") or + k in [ + "metric_type", + "distribution_type", + "requires:data_type", + "no_rescaling", + "expected_loss_ndim", + ] + ) } for pkg in METRIC_PKGS } From 78e8712e07329201046e1683c86cf15762d82d9e Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Thu, 11 Sep 2025 01:56:30 +0530 Subject: [PATCH 09/22] precommit --- pytorch_forecasting/tests/_loss_mapping.py | 29 +++++++++++----------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 704321fe3..755dfcfca 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -1,13 +1,3 @@ -from pytorch_forecasting.metrics._distributions_pkg import ( - BetaDistributionLoss_pkg, - ImplicitQuantileNetworkDistributionLoss_pkg, - LogNormalDistributionLoss_pkg, - MQF2DistributionLoss_pkg, - MultivariateNormalDistributionLoss_pkg, - NegativeBinomialDistributionLoss_pkg, - NormalDistributionLoss_pkg, -) - from pytorch_forecasting.metrics import ( BetaDistributionLoss, ImplicitQuantileNetworkDistributionLoss, @@ -17,7 +7,15 @@ NegativeBinomialDistributionLoss, NormalDistributionLoss, ) - +from pytorch_forecasting.metrics._distributions_pkg import ( + BetaDistributionLoss_pkg, + ImplicitQuantileNetworkDistributionLoss_pkg, + LogNormalDistributionLoss_pkg, + MQF2DistributionLoss_pkg, + MultivariateNormalDistributionLoss_pkg, + NegativeBinomialDistributionLoss_pkg, + NormalDistributionLoss_pkg, +) from pytorch_forecasting.metrics._point_pkg import ( CrossEntropy_pkg, MAE_pkg, @@ -66,10 +64,11 @@ k: v for k, v in pkg._tags.items() if not ( - k.startswith("info:") or - k.startswith("capability:") or - k.startswith("shape:") or - k in [ + k.startswith("info:") + or k.startswith("capability:") + or k.startswith("shape:") + or k + in [ "metric_type", "distribution_type", "requires:data_type", From e366916ad9d1e2a8e6d7975376c06a31605a67fc Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Fri, 12 Sep 2025 19:00:31 +0530 Subject: [PATCH 10/22] Updates --- docs/source/tutorials/ar.ipynb | 32 +++---------- docs/source/tutorials/deepar.ipynb | 35 ++++---------- docs/source/tutorials/nhits.ipynb | 46 +++++-------------- .../_beta/_beta_distribution_loss_pkg.py | 21 ++------- ..._quantile_network_distribution_loss_pkg.py | 16 +------ .../_log_normal_distribution_loss_pkg.py | 19 +------- ...ltivariate_normal_distribution_loss_pkg.py | 14 +----- ...negative_binomial_distribution_loss_pkg.py | 17 +------ .../_normal/_normal_distribution_loss_pkg.py | 14 +----- .../_cross_entropy/_cross_entropy_pkg.py | 15 +----- .../metrics/_point_pkg/_mae/_mae_pkg.py | 15 +----- .../metrics/_point_pkg/_mape/_mape_pkg.py | 15 +----- .../metrics/_point_pkg/_mase/_mase_pkg.py | 15 +----- .../_point_pkg/_poisson/_poisson_loss_pkg.py | 15 +----- .../metrics/_point_pkg/_rmse/_rmse_pkg.py | 15 +----- .../metrics/_point_pkg/_smape/_smape_pkg.py | 15 +----- .../_point_pkg/_tweedie/_tweedie_loss_pkg.py | 15 +----- .../_quantile_pkg/_quantile_loss_pkg.py | 15 +----- .../metrics/base_metrics/_base_object.py | 23 ++++++++++ .../models/nhits/_nhits_pkg.py | 19 +++++++- pytorch_forecasting/tests/_loss_mapping.py | 19 -------- 21 files changed, 86 insertions(+), 324 deletions(-) diff --git a/docs/source/tutorials/ar.ipynb b/docs/source/tutorials/ar.ipynb index 09bc1414f..7cbf0fbc6 100644 --- a/docs/source/tutorials/ar.ipynb +++ b/docs/source/tutorials/ar.ipynb @@ -187,16 +187,10 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(\n", - " training, data, min_prediction_idx=training_cutoff + 1\n", - ")\n", + "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(\n", - " train=True, batch_size=batch_size, num_workers=0\n", - ")\n", - "val_dataloader = validation.to_dataloader(\n", - " train=False, batch_size=batch_size, num_workers=0\n", - ")" + "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", + "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" ] }, { @@ -275,13 +269,7 @@ "source": [ "pl.seed_everything(42)\n", "trainer = pl.Trainer(accelerator=\"auto\", gradient_clip_val=0.1)\n", - "net = NBeats.from_dataset(\n", - " training,\n", - " learning_rate=3e-2,\n", - " weight_decay=1e-2,\n", - " widths=[32, 512],\n", - " backcast_loss_ratio=0.1,\n", - ")" + "net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2, widths=[32, 512], backcast_loss_ratio=0.1)" ] }, { @@ -335,9 +323,7 @@ "# find optimal learning rate\n", "from lightning.pytorch.tuner import Tuner\n", "\n", - "res = Tuner(trainer).lr_find(\n", - " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5\n", - ")\n", + "res = Tuner(trainer).lr_find(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5)\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", "fig.show()\n", @@ -457,9 +443,7 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", - ")\n", + "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", "trainer = pl.Trainer(\n", " max_epochs=3,\n", " accelerator=\"auto\",\n", @@ -661,9 +645,7 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", - " )" + " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" ] }, { diff --git a/docs/source/tutorials/deepar.ipynb b/docs/source/tutorials/deepar.ipynb index 59b975fa9..b6dd1fc1a 100644 --- a/docs/source/tutorials/deepar.ipynb +++ b/docs/source/tutorials/deepar.ipynb @@ -184,9 +184,7 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(\n", - " training, data, min_prediction_idx=training_cutoff + 1\n", - ")\n", + "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", "batch_size = 128\n", "# synchronize samples in each batch over time - only necessary for DeepVAR, not for DeepAR\n", "train_dataloader = training.to_dataloader(\n", @@ -242,9 +240,7 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -849,9 +845,7 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", - ")\n", + "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", "trainer = pl.Trainer(\n", " max_epochs=30,\n", " accelerator=\"cpu\",\n", @@ -919,9 +913,7 @@ ], "source": [ "# best_model = net\n", - "predictions = best_model.predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -943,11 +935,7 @@ ], "source": [ "raw_predictions = net.predict(\n", - " val_dataloader,\n", - " mode=\"raw\",\n", - " return_x=True,\n", - " n_samples=100,\n", - " trainer_kwargs=dict(accelerator=\"cpu\"),\n", + " val_dataloader, mode=\"raw\", return_x=True, n_samples=100, trainer_kwargs=dict(accelerator=\"cpu\")\n", ")" ] }, @@ -1160,9 +1148,7 @@ "source": [ "series = validation.x_to_index(raw_predictions.x)[\"series\"]\n", "for idx in range(20): # plot 10 examples\n", - " best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", - " )\n", + " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)\n", " plt.suptitle(f\"Series: {series.iloc[idx]}\")" ] }, @@ -1213,17 +1199,12 @@ "source": [ "cov_matrix = best_model.loss.map_x_to_distribution(\n", " best_model.predict(\n", - " val_dataloader,\n", - " mode=(\"raw\", \"prediction\"),\n", - " n_samples=None,\n", - " trainer_kwargs=dict(accelerator=\"cpu\"),\n", + " val_dataloader, mode=(\"raw\", \"prediction\"), n_samples=None, trainer_kwargs=dict(accelerator=\"cpu\")\n", " )\n", ").base_dist.covariance_matrix.mean(0)\n", "\n", "# normalize the covariance matrix diagnoal to 1.0\n", - "correlation_matrix = cov_matrix / torch.sqrt(\n", - " torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T\n", - ")\n", + "correlation_matrix = cov_matrix / torch.sqrt(torch.diag(cov_matrix)[None] * torch.diag(cov_matrix)[None].T)\n", "\n", "fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n", "ax.imshow(correlation_matrix, cmap=\"bwr\")" diff --git a/docs/source/tutorials/nhits.ipynb b/docs/source/tutorials/nhits.ipynb index 2416c9ef5..bb79ae57a 100644 --- a/docs/source/tutorials/nhits.ipynb +++ b/docs/source/tutorials/nhits.ipynb @@ -189,16 +189,10 @@ " max_prediction_length=prediction_length,\n", ")\n", "\n", - "validation = TimeSeriesDataSet.from_dataset(\n", - " training, data, min_prediction_idx=training_cutoff + 1\n", - ")\n", + "validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)\n", "batch_size = 128\n", - "train_dataloader = training.to_dataloader(\n", - " train=True, batch_size=batch_size, num_workers=0\n", - ")\n", - "val_dataloader = validation.to_dataloader(\n", - " train=False, batch_size=batch_size, num_workers=0\n", - ")" + "train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n", + "val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)" ] }, { @@ -246,9 +240,7 @@ ], "source": [ "# calculate baseline absolute error\n", - "baseline_predictions = Baseline().predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "baseline_predictions = Baseline().predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "SMAPE()(baseline_predictions.output, baseline_predictions.y)" ] }, @@ -356,11 +348,7 @@ "from lightning.pytorch.tuner import Tuner\n", "\n", "res = Tuner(trainer).lr_find(\n", - " net,\n", - " train_dataloaders=train_dataloader,\n", - " val_dataloaders=val_dataloader,\n", - " min_lr=1e-5,\n", - " max_lr=1e-1,\n", + " net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e-1\n", ")\n", "print(f\"suggested learning rate: {res.suggestion()}\")\n", "fig = res.plot(show=True, suggest=True)\n", @@ -510,9 +498,7 @@ } ], "source": [ - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\"\n", - ")\n", + "early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n", "trainer = pl.Trainer(\n", " max_epochs=5,\n", " accelerator=\"cpu\",\n", @@ -597,9 +583,7 @@ } ], "source": [ - "predictions = best_model.predict(\n", - " val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True\n", - ")\n", + "predictions = best_model.predict(val_dataloader, trainer_kwargs=dict(accelerator=\"cpu\"), return_y=True)\n", "MAE()(predictions.output, predictions.y)" ] }, @@ -628,9 +612,7 @@ } ], "source": [ - "raw_predictions = best_model.predict(\n", - " val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\")\n", - ")" + "raw_predictions = best_model.predict(val_dataloader, mode=\"raw\", return_x=True, trainer_kwargs=dict(accelerator=\"cpu\"))" ] }, { @@ -741,9 +723,7 @@ ], "source": [ "for idx in range(10): # plot 10 examples\n", - " best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True\n", - " )" + " best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=idx, add_loss_to_title=True)" ] }, { @@ -830,14 +810,10 @@ ], "source": [ "# sample 500 paths\n", - "samples = best_model.loss.sample(\n", - " raw_predictions.output[\"prediction\"][[0]], n_samples=500\n", - ")[0]\n", + "samples = best_model.loss.sample(raw_predictions.output[\"prediction\"][[0]], n_samples=500)[0]\n", "\n", "# plot prediction\n", - "fig = best_model.plot_prediction(\n", - " raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True\n", - ")\n", + "fig = best_model.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True)\n", "ax = fig.get_axes()[0]\n", "# plot first two sampled paths\n", "ax.plot(samples[:, 0], color=\"g\", label=\"Sample 1\")\n", diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index cc70d893e..0acb29aca 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -42,23 +42,8 @@ def get_encoder(cls): return TorchNormalizer(transformation="logit") @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ - Return test dataloaders configured for BetaDistributionLoss. + Returns test dataloaders configured for BetaDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - clip_target = cls._tags.get("clip_target", False) - data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() - data_loader_kwargs.update(params.get("data_loader_kwargs", {})) - - data = data_with_covariates() - if clip_target: - data["target"] = data["target"].clip(1e-4, 1 - 1e-4) - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params, target="agency") diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py index 56696c781..ec8aa4798 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_implicit_quantile_network/_implicit_quantile_network_distribution_loss_pkg.py @@ -48,20 +48,8 @@ def get_metric_test_params(cls): return [{"input_size": 5}] @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for ImplicitQuantileNetworkDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() - data_loader_kwargs.update(params.get("data_loader_kwargs", {})) - - data = data_with_covariates() - dataloaders = make_dataloaders(data**data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index 327b4f6e4..b1c1428c4 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -60,23 +60,8 @@ def prepare_test_inputs(cls, test_case): return y_pred, y @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for LogNormalDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - clip_target = cls._tags.get("clip_target", False) - data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() - data_loader_kwargs.update(params.get("data_loader_kwargs", {})) - - data = data_with_covariates() - if clip_target: - data["target"] = data["target"].clip(1e-4, None) - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 71fadc48c..1773412c5 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -41,16 +41,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MultivariateNormalDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() - data_loader_kwargs.update(params.get("data_loader_kwargs", {})) - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index f4dca1c39..0e33f3883 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -46,19 +46,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for NegativeBinomialDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - clip_target = cls._tags.get("clip_target", False) - data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() - data_loader_kwargs.update(params.get("data_loader_kwargs", {})) - - data = data_with_covariates() - if clip_target: - data["target"] = data["target"].clip(1e-4, None) - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + super()._get_test_dataloaders_from(params, target="agency") diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py index dd4cb617d..2617a6be5 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py @@ -33,16 +33,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for NormalDistributionLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = cls._tags.get("data_loader_kwargs", {}).copy() - data_loader_kwargs.update(params.get("data_loader_kwargs", {})) - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py index f3be4d37e..07d30595a 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py @@ -33,17 +33,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for CrossEntropy. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For classification, set target to a categorical column, e.g., "agency" - data_loader_kwargs.setdefault("target", "agency") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + super()._get_test_dataloaders_from(params=params, target="category") diff --git a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py index 471d3e523..47c6af031 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py @@ -31,17 +31,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MAE. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py index b28ff7768..f6e8520d7 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py @@ -33,17 +33,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MAPE. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py index 5fa110710..b20505d02 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py @@ -29,17 +29,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for MASE. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py index fa9520b45..b8743d1c5 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py @@ -33,17 +33,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for PoissonLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py index facb2ebb5..7a2b25d2d 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py @@ -31,17 +31,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for RMSE. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py index 6dfca224f..f65bb5e00 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py @@ -33,17 +33,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for SMAPE. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params=params, target="agency") diff --git a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py index 330164480..bc45e4d01 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py @@ -31,17 +31,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for TweedieLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For point metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params, target="agency") diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 8de6e33b2..261253ef4 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -42,17 +42,4 @@ def _get_test_dataloaders_from(cls, params=None): """ Returns test dataloaders configured for QuantileLoss. """ - from pytorch_forecasting.tests._data_scenarios import ( - data_with_covariates, - make_dataloaders, - ) - - if params is None: - params = {} - data_loader_kwargs = params.get("data_loader_kwargs", {}) - # For quantile metrics, default target is "target" - data_loader_kwargs.setdefault("target", "target") - - data = data_with_covariates() - dataloaders = make_dataloaders(data, **data_loader_kwargs) - return dataloaders + return super()._get_test_dataloaders_from(params, target="agency") diff --git a/pytorch_forecasting/metrics/base_metrics/_base_object.py b/pytorch_forecasting/metrics/base_metrics/_base_object.py index d695ed087..a987ebbf9 100644 --- a/pytorch_forecasting/metrics/base_metrics/_base_object.py +++ b/pytorch_forecasting/metrics/base_metrics/_base_object.py @@ -1,6 +1,10 @@ """Base object class for pytorch-forecasting metrics.""" from pytorch_forecasting.base._base_object import _BaseObject +from pytorch_forecasting.tests._data_scenarios import ( + data_with_covariates, + make_dataloaders, +) class _BasePtMetric(_BaseObject): @@ -78,3 +82,22 @@ def get_encoder(cls): from pytorch_forecasting.data import TorchNormalizer return TorchNormalizer() + + @classmethod + def _get_test_dataloaders_from(cls, params=None, **kwargs): + """ + Returns test dataloaders configured for the metric. + Child classes can override or pass kwargs for customization. + """ + if params is None: + params = {} + data_loader_kwargs = {} + data_loader_kwargs.update(cls._tags.get("data_loader_kwargs", {})) + data_loader_kwargs.update(params.get("data_loader_kwargs", {})) + data_loader_kwargs.update(kwargs) + clip_target = cls._tags.get("clip_target", False) + data = data_with_covariates() + if clip_target: + data["target"] = data["target"].clip(1e-4, 1 - 1e-4) + dataloaders = make_dataloaders(data, **data_loader_kwargs) + return dataloaders diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index 359b7f349..bc4c70439 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -1,7 +1,25 @@ """NHiTS package container.""" +from pytorch_forecasting.metrics.distributions import ( + BetaDistributionLoss, + ImplicitQuantileNetworkDistributionLoss, + LogNormalDistributionLoss, + MultivariateNormalDistributionLoss, + NegativeBinomialDistributionLoss, + NormalDistributionLoss, +) from pytorch_forecasting.models.base._base_object import _BasePtForecaster +DISTR_LOSSES_NUMERIC = [ + NormalDistributionLoss(), + NegativeBinomialDistributionLoss(), + MultivariateNormalDistributionLoss(), + LogNormalDistributionLoss(), + BetaDistributionLoss(), + ImplicitQuantileNetworkDistributionLoss(), + # todo: still need some debugging to add the MQF2DistributionLoss +] + class NHiTS_pkg(_BasePtForecaster): """NHiTS package container.""" @@ -84,7 +102,6 @@ def _get_test_dataloaders_from(cls, params): dataloaders_fixed_window_without_covariates, make_dataloaders, ) - from pytorch_forecasting.tests._loss_mapping import DISTR_LOSSES_NUMERIC distr_losses = tuple( type(l) diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 755dfcfca..cea5096ac 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -1,12 +1,3 @@ -from pytorch_forecasting.metrics import ( - BetaDistributionLoss, - ImplicitQuantileNetworkDistributionLoss, - LogNormalDistributionLoss, - MQF2DistributionLoss, - MultivariateNormalDistributionLoss, - NegativeBinomialDistributionLoss, - NormalDistributionLoss, -) from pytorch_forecasting.metrics._distributions_pkg import ( BetaDistributionLoss_pkg, ImplicitQuantileNetworkDistributionLoss_pkg, @@ -49,16 +40,6 @@ QuantileLoss_pkg, ] -DISTR_LOSSES_NUMERIC = [ - NormalDistributionLoss(), - NegativeBinomialDistributionLoss(), - MultivariateNormalDistributionLoss(), - LogNormalDistributionLoss(), - BetaDistributionLoss(), - ImplicitQuantileNetworkDistributionLoss(), - # todo: still need some debugging to add the MQF2DistributionLoss -] - LOSS_SPECIFIC_PARAMS = { pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { k: v From a7ca097188b75ce22a2cee525b8913d031ef50bc Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 13 Sep 2025 00:04:41 +0530 Subject: [PATCH 11/22] Updates --- pytorch_forecasting/models/nbeats/_nbeats_pkg.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pytorch_forecasting/models/nbeats/_nbeats_pkg.py b/pytorch_forecasting/models/nbeats/_nbeats_pkg.py index a6f008771..daeab1c4e 100644 --- a/pytorch_forecasting/models/nbeats/_nbeats_pkg.py +++ b/pytorch_forecasting/models/nbeats/_nbeats_pkg.py @@ -40,16 +40,6 @@ def get_base_test_params(cls): """ return [{}, {"backcast_loss_ratio": 1.0}] - @classmethod - def get_test_train_params(cls): - """ - Return a list of parameter dictionaries for integration tests. - """ - return [ - {}, - {"backcast_loss_ratio": 1.0}, - ] - @classmethod def _get_test_dataloaders_from(cls, params): """Get dataloaders from parameters. From 6727e1d844d784216b19b18e442d4f7505f6e1e5 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 13 Sep 2025 00:23:19 +0530 Subject: [PATCH 12/22] Updates --- .../models/nhits/_nhits_pkg.py | 31 ++++++++----------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index bc4c70439..fe8f66777 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -1,25 +1,7 @@ """NHiTS package container.""" -from pytorch_forecasting.metrics.distributions import ( - BetaDistributionLoss, - ImplicitQuantileNetworkDistributionLoss, - LogNormalDistributionLoss, - MultivariateNormalDistributionLoss, - NegativeBinomialDistributionLoss, - NormalDistributionLoss, -) from pytorch_forecasting.models.base._base_object import _BasePtForecaster -DISTR_LOSSES_NUMERIC = [ - NormalDistributionLoss(), - NegativeBinomialDistributionLoss(), - MultivariateNormalDistributionLoss(), - LogNormalDistributionLoss(), - BetaDistributionLoss(), - ImplicitQuantileNetworkDistributionLoss(), - # todo: still need some debugging to add the MQF2DistributionLoss -] - class NHiTS_pkg(_BasePtForecaster): """NHiTS package container.""" @@ -95,6 +77,9 @@ def _get_test_dataloaders_from(cls, params): MQF2DistributionLoss, MultivariateNormalDistributionLoss, NegativeBinomialDistributionLoss, + NormalDistributionLoss, + BetaDistributionLoss, + ImplicitQuantileNetworkDistributionLoss, TweedieLoss, ) from pytorch_forecasting.tests._data_scenarios import ( @@ -103,6 +88,16 @@ def _get_test_dataloaders_from(cls, params): make_dataloaders, ) + DISTR_LOSSES_NUMERIC = [ + NormalDistributionLoss(), + NegativeBinomialDistributionLoss(), + MultivariateNormalDistributionLoss(), + LogNormalDistributionLoss(), + BetaDistributionLoss(), + ImplicitQuantileNetworkDistributionLoss(), + # todo: still need some debugging to add the MQF2DistributionLoss + ] + distr_losses = tuple( type(l) for l in DISTR_LOSSES_NUMERIC From d2b66e6df60b441a0454ecb63ccae4b54cf2f96a Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 13 Sep 2025 00:35:20 +0530 Subject: [PATCH 13/22] Updates --- .../models/nhits/_nhits_pkg.py | 97 +++++-------------- 1 file changed, 22 insertions(+), 75 deletions(-) diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index fe8f66777..b3b7a6da1 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -51,36 +51,17 @@ def get_base_test_params(cls): @classmethod def _get_test_dataloaders_from(cls, params): - """Get dataloaders from parameters. - - Parameters - ---------- - params : dict - Parameters to create dataloaders. - One of the elements in the list returned by ``get_test_train_params``. - - Returns - ------- - dataloaders : dict with keys "train", "val", "test", values torch DataLoader - Dict of dataloaders created from the parameters. - Train, validation, and test dataloaders, in this order. """ - + Get dataloaders from parameters. + """ loss = params.get("loss", None) data_loader_kwargs = params.get("data_loader_kwargs", {}) clip_target = params.get("clip_target", False) - import inspect - from pytorch_forecasting.metrics import ( LogNormalDistributionLoss, - MQF2DistributionLoss, MultivariateNormalDistributionLoss, NegativeBinomialDistributionLoss, - NormalDistributionLoss, - BetaDistributionLoss, - ImplicitQuantileNetworkDistributionLoss, - TweedieLoss, ) from pytorch_forecasting.tests._data_scenarios import ( data_with_covariates, @@ -88,60 +69,26 @@ def _get_test_dataloaders_from(cls, params): make_dataloaders, ) - DISTR_LOSSES_NUMERIC = [ - NormalDistributionLoss(), - NegativeBinomialDistributionLoss(), - MultivariateNormalDistributionLoss(), - LogNormalDistributionLoss(), - BetaDistributionLoss(), - ImplicitQuantileNetworkDistributionLoss(), - # todo: still need some debugging to add the MQF2DistributionLoss - ] - - distr_losses = tuple( - type(l) - for l in DISTR_LOSSES_NUMERIC - if not isinstance(l, MultivariateNormalDistributionLoss) - # use dataloaders without covariates as default settings of nhits - # (hidden_size = 512) is not compatible with - # MultivariateNormalDistributionLoss causing Cholesky - # decomposition to fail during loss computation. + # Use fixed window dataloaders for MultivariateNormalDistributionLoss + if isinstance(loss, MultivariateNormalDistributionLoss): + return dataloaders_fixed_window_without_covariates() + + # For other distribution losses, use covariates and apply preprocessing + dwc = data_with_covariates() + if clip_target: + dwc["target"] = dwc["volume"].clip(1e-3, 1.0) + else: + dwc["target"] = dwc["volume"] + dl_default_kwargs = dict( + target="volume", + time_varying_unknown_reals=["volume"], + add_relative_time_idx=False, ) + dl_default_kwargs.update(data_loader_kwargs) - if isinstance(loss, distr_losses): - dwc = data_with_covariates() - if clip_target: - dwc["target"] = dwc["volume"].clip(1e-3, 1.0) - else: - dwc["target"] = dwc["volume"] - dl_default_kwargs = dict( - target="volume", - time_varying_unknown_reals=["volume"], - add_relative_time_idx=False, - ) - dl_default_kwargs.update(data_loader_kwargs) - - if isinstance(loss, NegativeBinomialDistributionLoss): - dwc = dwc.assign(volume=lambda x: x.volume.round()) - # todo: still need some debugging to add the MQF2DistributionLoss - # elif inspect.isclass(loss) and issubclass(loss, MQF2DistributionLoss): - # dwc = dwc.assign(volume=lambda x: x.volume.round()) - # data_loader_kwargs["target"] = "volume" - # data_loader_kwargs["time_varying_unknown_reals"] = ["volume"] - elif isinstance(loss, LogNormalDistributionLoss): - dwc["volume"] = dwc["volume"].clip(1e-3, 1.0) - dataloaders_with_covariates = make_dataloaders(dwc, **dl_default_kwargs) - return dataloaders_with_covariates - - if isinstance(loss, TweedieLoss): - dwc = data_with_covariates() - dl_default_kwargs = dict( - target="target", - time_varying_unknown_reals=["target"], - add_relative_time_idx=False, - ) - dl_default_kwargs.update(data_loader_kwargs) - dataloaders_with_covariates = make_dataloaders(dwc, **dl_default_kwargs) - return dataloaders_with_covariates + if isinstance(loss, NegativeBinomialDistributionLoss): + dwc = dwc.assign(volume=lambda x: x.volume.round()) + elif isinstance(loss, LogNormalDistributionLoss): + dwc["volume"] = dwc["volume"].clip(1e-3, 1.0) - return dataloaders_fixed_window_without_covariates() + return make_dataloaders(dwc, **dl_default_kwargs) From 000edb50dc15416ae2d544cd89b0839540c0253d Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 13 Sep 2025 01:49:05 +0530 Subject: [PATCH 14/22] Updates --- pytorch_forecasting/models/nhits/_nhits_pkg.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index b3b7a6da1..46f6ec7b3 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -62,6 +62,7 @@ def _get_test_dataloaders_from(cls, params): LogNormalDistributionLoss, MultivariateNormalDistributionLoss, NegativeBinomialDistributionLoss, + PoissonLoss, ) from pytorch_forecasting.tests._data_scenarios import ( data_with_covariates, @@ -69,10 +70,19 @@ def _get_test_dataloaders_from(cls, params): make_dataloaders, ) - # Use fixed window dataloaders for MultivariateNormalDistributionLoss if isinstance(loss, MultivariateNormalDistributionLoss): return dataloaders_fixed_window_without_covariates() + if isinstance(loss, PoissonLoss): + dl_default_kwargs = dict( + target="agency", + time_varying_unknown_reals=[], + add_relative_time_idx=False, + ) + dl_default_kwargs.update(data_loader_kwargs) + data = data_with_covariates() + return make_dataloaders(data, **dl_default_kwargs) + # For other distribution losses, use covariates and apply preprocessing dwc = data_with_covariates() if clip_target: From d61fe2aaf054b4a313fc246648e07dccc46f81a0 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 13 Sep 2025 11:56:12 +0530 Subject: [PATCH 15/22] Updates --- .../models/nhits/_nhits_pkg.py | 62 +++++++++++-------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index 46f6ec7b3..17256a9b7 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -51,18 +51,17 @@ def get_base_test_params(cls): @classmethod def _get_test_dataloaders_from(cls, params): - """ - Get dataloaders from parameters. - """ loss = params.get("loss", None) data_loader_kwargs = params.get("data_loader_kwargs", {}) clip_target = params.get("clip_target", False) from pytorch_forecasting.metrics import ( + BetaDistributionLoss, LogNormalDistributionLoss, + MQF2DistributionLoss, MultivariateNormalDistributionLoss, NegativeBinomialDistributionLoss, - PoissonLoss, + TweedieLoss, ) from pytorch_forecasting.tests._data_scenarios import ( data_with_covariates, @@ -70,35 +69,46 @@ def _get_test_dataloaders_from(cls, params): make_dataloaders, ) + # Use fixed window dataloaders for MultivariateNormalDistributionLoss if isinstance(loss, MultivariateNormalDistributionLoss): return dataloaders_fixed_window_without_covariates() - if isinstance(loss, PoissonLoss): + # For other distribution losses, use covariates and apply preprocessing + if isinstance( + loss, + ( + LogNormalDistributionLoss, + NegativeBinomialDistributionLoss, + MQF2DistributionLoss, + BetaDistributionLoss, + ), + ): + dwc = data_with_covariates() + if clip_target: + dwc["target"] = dwc["volume"].clip(1e-3, 1.0) + else: + dwc["target"] = dwc["volume"] dl_default_kwargs = dict( - target="agency", - time_varying_unknown_reals=[], + target="volume", + time_varying_unknown_reals=["volume"], add_relative_time_idx=False, ) dl_default_kwargs.update(data_loader_kwargs) - data = data_with_covariates() - return make_dataloaders(data, **dl_default_kwargs) - # For other distribution losses, use covariates and apply preprocessing - dwc = data_with_covariates() - if clip_target: - dwc["target"] = dwc["volume"].clip(1e-3, 1.0) - else: - dwc["target"] = dwc["volume"] - dl_default_kwargs = dict( - target="volume", - time_varying_unknown_reals=["volume"], - add_relative_time_idx=False, - ) - dl_default_kwargs.update(data_loader_kwargs) + if isinstance(loss, NegativeBinomialDistributionLoss): + dwc = dwc.assign(volume=lambda x: x.volume.round()) + elif isinstance(loss, LogNormalDistributionLoss): + dwc["volume"] = dwc["volume"].clip(1e-3, 1.0) + return make_dataloaders(dwc, **dl_default_kwargs) - if isinstance(loss, NegativeBinomialDistributionLoss): - dwc = dwc.assign(volume=lambda x: x.volume.round()) - elif isinstance(loss, LogNormalDistributionLoss): - dwc["volume"] = dwc["volume"].clip(1e-3, 1.0) + if isinstance(loss, TweedieLoss): + dwc = data_with_covariates() + dl_default_kwargs = dict( + target="target", + time_varying_unknown_reals=["target"], + add_relative_time_idx=False, + ) + dl_default_kwargs.update(data_loader_kwargs) + return make_dataloaders(dwc, **dl_default_kwargs) - return make_dataloaders(dwc, **dl_default_kwargs) + return dataloaders_fixed_window_without_covariates() From 7fbfa71917ef39c76fce5a0910f0268446bdc557 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sun, 14 Sep 2025 01:15:53 +0530 Subject: [PATCH 16/22] Further refactor --- .../_beta/_beta_distribution_loss_pkg.py | 21 +++++++++------- .../_log_normal_distribution_loss_pkg.py | 21 +++++++++------- ...ltivariate_normal_distribution_loss_pkg.py | 19 ++++++++------- ...negative_binomial_distribution_loss_pkg.py | 19 +++++++++------ .../_normal/_normal_distribution_loss_pkg.py | 4 ++-- .../_cross_entropy/_cross_entropy_pkg.py | 2 +- .../metrics/_point_pkg/_mae/_mae_pkg.py | 2 +- .../metrics/_point_pkg/_mape/_mape_pkg.py | 2 +- .../metrics/_point_pkg/_mase/_mase_pkg.py | 2 +- .../_point_pkg/_poisson/_poisson_loss_pkg.py | 2 +- .../metrics/_point_pkg/_rmse/_rmse_pkg.py | 2 +- .../metrics/_point_pkg/_smape/_smape_pkg.py | 2 +- .../_point_pkg/_tweedie/_tweedie_loss_pkg.py | 2 +- .../_quantile_pkg/_quantile_loss_pkg.py | 2 +- .../metrics/base_metrics/_base_object.py | 6 ++--- pytorch_forecasting/tests/_loss_mapping.py | 24 ++++++------------- 16 files changed, 70 insertions(+), 62 deletions(-) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index 0acb29aca..ec117e68f 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -17,15 +17,16 @@ class BetaDistributionLoss_pkg(_BasePtMetric): "distribution_type": "beta", "info:metric_name": "BetaDistributionLoss", "requires:data_type": "beta_distribution_forecast", - "clip_target": True, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="logit" - ) - }, "info:pred_type": ["distr"], "info:y_type": ["numeric"], - "expected_loss_ndim": 2, + "loss_ndim": 2, + } + + clip_target = True + data_loader_kwargs = { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="logit" + ) } @classmethod @@ -46,4 +47,8 @@ def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for BetaDistributionLoss. """ - return super()._get_test_dataloaders_from(params, target="agency") + kwargs = dict(target="agency") + kwargs.update(cls.data_loader_kwargs) + return super()._get_test_dataloaders_from( + params, clip_target=cls.clip_target, **kwargs + ) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index b1c1428c4..41358a791 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -19,15 +19,16 @@ class LogNormalDistributionLoss_pkg(_BasePtMetric): "distribution_type": "log_normal", "info:metric_name": "LogNormalDistributionLoss", "requires:data_type": "log_normal_distribution_forecast", - "clip_target": True, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="log1p" - ) - }, "info:pred_type": ["distr"], "info:y_type": ["numeric"], - "expected_loss_ndim": 2, + "loss_ndim": 2, + } + + clip_target = True + data_loader_kwargs = { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="log1p" + ) } @classmethod @@ -64,4 +65,8 @@ def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for LogNormalDistributionLoss. """ - super()._get_test_dataloaders_from(params=params, target="agency") + kwargs = dict(target="agency") + kwargs.update(cls.data_loader_kwargs) + return super()._get_test_dataloaders_from( + params, clip_target=cls.clip_target, **kwargs + ) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 1773412c5..92c83962f 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -18,14 +18,15 @@ class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): "distribution_type": "multivariate_normal", "info:metric_name": "MultivariateNormalDistributionLoss", "requires:data_type": "multivariate_normal_distribution_forecast", - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="log1p" - ) - }, "info:pred_type": ["distr"], "info:y_type": ["numeric"], - "expected_loss_ndim": 2, + "loss_ndim": 2, + } + + data_loader_kwargs = { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="log1p" + ) } @classmethod @@ -37,8 +38,10 @@ def get_cls(cls): return MultivariateNormalDistributionLoss @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for MultivariateNormalDistributionLoss. """ - super()._get_test_dataloaders_from(params=params, target="agency") + kwargs = dict(target="agency") + kwargs.update(cls.data_loader_kwargs) + return super()._get_test_dataloaders_from(params, **kwargs) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index 0e33f3883..c91e404ee 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -17,13 +17,14 @@ class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): "distribution_type": "negative_binomial", "info:metric_name": "NegativeBinomialDistributionLoss", "requires:data_type": "negative_binomial_distribution_forecast", - "clip_target": False, - "data_loader_kwargs": { - "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) - }, "info:pred_type": ["distr"], "info:y_type": ["numeric"], - "expected_loss_ndim": 2, + "loss_ndim": 2, + } + + clip_target = False + data_loader_kwargs = { + "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) } @classmethod @@ -42,8 +43,12 @@ def get_encoder(cls): return TorchNormalizer(center=False) @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for NegativeBinomialDistributionLoss. """ - super()._get_test_dataloaders_from(params, target="agency") + kwargs = dict(target="agency") + kwargs.update(cls.data_loader_kwargs) + return super()._get_test_dataloaders_from( + params, clip_target=cls.clip_target, **kwargs + ) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py index 2617a6be5..afff845ac 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_normal/_normal_distribution_loss_pkg.py @@ -19,7 +19,7 @@ class NormalDistributionLoss_pkg(_BasePtMetric): "requires:data_type": "normal_distribution_forecast", "info:pred_type": ["distr"], "info:y_type": ["numeric"], - "expected_loss_ndim": 2, + "loss_ndim": 2, } @classmethod @@ -29,7 +29,7 @@ def get_cls(cls): return NormalDistributionLoss @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for NormalDistributionLoss. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py index 07d30595a..23f5f90bf 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py @@ -29,7 +29,7 @@ def get_cls(cls): return CrossEntropy @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for CrossEntropy. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py index 47c6af031..f1cbb413a 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py @@ -27,7 +27,7 @@ def get_cls(cls): return MAE @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for MAE. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py index f6e8520d7..c2dc963b8 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py @@ -29,7 +29,7 @@ def get_cls(cls): return MAPE @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for MAPE. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py index b20505d02..8c7ba2a09 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py @@ -25,7 +25,7 @@ def get_cls(cls): return MASE @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for MASE. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py index b8743d1c5..2dcbf5be4 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py @@ -29,7 +29,7 @@ def get_cls(cls): return PoissonLoss @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for PoissonLoss. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py index 7a2b25d2d..9bec3eb76 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py @@ -27,7 +27,7 @@ def get_cls(cls): return RMSE @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for RMSE. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py index f65bb5e00..1aace449d 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py @@ -29,7 +29,7 @@ def get_cls(cls): return SMAPE @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for SMAPE. """ diff --git a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py index bc45e4d01..6bd9fc7b3 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py @@ -27,7 +27,7 @@ def get_cls(cls): return TweedieLoss @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for TweedieLoss. """ diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 261253ef4..05d3a5e85 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -38,7 +38,7 @@ def get_metric_test_params(cls): ] @classmethod - def _get_test_dataloaders_from(cls, params=None): + def _get_test_dataloaders(cls, params=None): """ Returns test dataloaders configured for QuantileLoss. """ diff --git a/pytorch_forecasting/metrics/base_metrics/_base_object.py b/pytorch_forecasting/metrics/base_metrics/_base_object.py index a987ebbf9..b6447da55 100644 --- a/pytorch_forecasting/metrics/base_metrics/_base_object.py +++ b/pytorch_forecasting/metrics/base_metrics/_base_object.py @@ -84,7 +84,7 @@ def get_encoder(cls): return TorchNormalizer() @classmethod - def _get_test_dataloaders_from(cls, params=None, **kwargs): + def _get_test_dataloaders_from(cls, params, **kwargs): """ Returns test dataloaders configured for the metric. Child classes can override or pass kwargs for customization. @@ -92,10 +92,10 @@ def _get_test_dataloaders_from(cls, params=None, **kwargs): if params is None: params = {} data_loader_kwargs = {} - data_loader_kwargs.update(cls._tags.get("data_loader_kwargs", {})) data_loader_kwargs.update(params.get("data_loader_kwargs", {})) data_loader_kwargs.update(kwargs) - clip_target = cls._tags.get("clip_target", False) + clip_target = params.get("clip_target", False) + data = data_with_covariates() if clip_target: data["target"] = data["target"].clip(1e-4, 1 - 1e-4) diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index cea5096ac..1c7282bb7 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -40,27 +40,17 @@ QuantileLoss_pkg, ] +# Only extract relevant static metadata from _tags, not runtime config like clip_target or data_loader_kwargs LOSS_SPECIFIC_PARAMS = { pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { - k: v - for k, v in pkg._tags.items() - if not ( - k.startswith("info:") - or k.startswith("capability:") - or k.startswith("shape:") - or k - in [ - "metric_type", - "distribution_type", - "requires:data_type", - "no_rescaling", - "expected_loss_ndim", - ] - ) + "clip_target": getattr(pkg, "clip_target", None), + "data_loader_kwargs": getattr(pkg, "data_loader_kwargs", {}), } for pkg in METRIC_PKGS } +print(LOSS_SPECIFIC_PARAMS) + def get_compatible_losses(pred_types, y_types): """ @@ -79,7 +69,7 @@ def get_compatible_losses(pred_types, y_types): def get_test_dataloaders_for_loss(pkg, params=None): """ - Get test dataloaders for a given loss package using its tags and method. + Get test dataloaders for a given loss package using its method. """ return pkg._get_test_dataloaders_from(params or {}) @@ -88,7 +78,7 @@ def check_loss_output_shape(pkg, y_pred, y_true): """ Check that the output shape of the loss matches the expected shape from tags. """ - expected_ndim = pkg._tags.get("expected_loss_ndim", None) + expected_ndim = pkg._tags.get("loss_ndim", None) loss_instance = pkg.get_cls()() result = loss_instance(y_pred, y_true) if expected_ndim is not None: From c27ae8bc8c8034a77f601c62daf345bcb0ee1e63 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sun, 14 Sep 2025 01:18:02 +0530 Subject: [PATCH 17/22] Updates --- pytorch_forecasting/tests/_loss_mapping.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index 1c7282bb7..d96ec546c 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -40,7 +40,6 @@ QuantileLoss_pkg, ] -# Only extract relevant static metadata from _tags, not runtime config like clip_target or data_loader_kwargs LOSS_SPECIFIC_PARAMS = { pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { "clip_target": getattr(pkg, "clip_target", None), From 6b2a926d207371fe2251bc7bdac76f379c41c194 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Tue, 16 Sep 2025 21:34:50 +0530 Subject: [PATCH 18/22] Updates: added properties for clip_target and data_loader_kwargs --- .../_beta/_beta_distribution_loss_pkg.py | 17 +++++++++++------ .../_log_normal_distribution_loss_pkg.py | 17 +++++++++++------ ...multivariate_normal_distribution_loss_pkg.py | 16 +++++++++++----- .../_negative_binomial_distribution_loss_pkg.py | 13 +++++++++---- pytorch_forecasting/tests/_loss_mapping.py | 14 ++++++-------- 5 files changed, 48 insertions(+), 29 deletions(-) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py index ec117e68f..3abf36659 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_beta/_beta_distribution_loss_pkg.py @@ -22,12 +22,17 @@ class BetaDistributionLoss_pkg(_BasePtMetric): "loss_ndim": 2, } - clip_target = True - data_loader_kwargs = { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="logit" - ) - } + @property + def clip_target(cls): + return True + + @property + def data_loader_kwargs(cls): + return { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="logit" + ) + } @classmethod def get_cls(cls): diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py index 41358a791..b48e6b809 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_log_normal/_log_normal_distribution_loss_pkg.py @@ -24,12 +24,17 @@ class LogNormalDistributionLoss_pkg(_BasePtMetric): "loss_ndim": 2, } - clip_target = True - data_loader_kwargs = { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="log1p" - ) - } + @property + def clip_target(self): + return True + + @property + def data_loader_kwargs(self): + return { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="log1p" + ) + } @classmethod def get_cls(cls): diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py index 92c83962f..85625ae92 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_multivariate_normal/_multivariate_normal_distribution_loss_pkg.py @@ -23,11 +23,17 @@ class MultivariateNormalDistributionLoss_pkg(_BasePtMetric): "loss_ndim": 2, } - data_loader_kwargs = { - "target_normalizer": GroupNormalizer( - groups=["agency", "sku"], transformation="log1p" - ) - } + @property + def clip_target(self): + return False + + @property + def data_loader_kwargs(self): + return { + "target_normalizer": GroupNormalizer( + groups=["agency", "sku"], transformation="log1p" + ) + } @classmethod def get_cls(cls): diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py index c91e404ee..e417d7560 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_negative_binomial/_negative_binomial_distribution_loss_pkg.py @@ -22,10 +22,15 @@ class NegativeBinomialDistributionLoss_pkg(_BasePtMetric): "loss_ndim": 2, } - clip_target = False - data_loader_kwargs = { - "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) - } + @property + def clip_target(self): + return False + + @property + def data_loader_kwargs(self): + return { + "target_normalizer": GroupNormalizer(groups=["agency", "sku"], center=False) + } @classmethod def get_cls(cls): diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index d96ec546c..b676eb4dc 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -41,15 +41,13 @@ ] LOSS_SPECIFIC_PARAMS = { - pkg._tags.get("info:metric_name", pkg.__name__.replace("_pkg", "")): { - "clip_target": getattr(pkg, "clip_target", None), - "data_loader_kwargs": getattr(pkg, "data_loader_kwargs", {}), + pkg.get_class_tag("info:metric_name", pkg.__name__.replace("_pkg", "")): { + "clip_target": getattr(pkg(), "clip_target", False), + "data_loader_kwargs": getattr(pkg(), "data_loader_kwargs", {}), } for pkg in METRIC_PKGS } -print(LOSS_SPECIFIC_PARAMS) - def get_compatible_losses(pred_types, y_types): """ @@ -57,8 +55,8 @@ def get_compatible_losses(pred_types, y_types): """ compatible_losses = [] for pkg in METRIC_PKGS: - pkg_pred_types = pkg._tags.get("info:pred_type", []) - pkg_y_types = pkg._tags.get("info:y_type", []) + pkg_pred_types = pkg.get_class_tag("info:pred_type", []) + pkg_y_types = pkg.get_class_tag("info:y_type", []) if any(pt in pred_types for pt in pkg_pred_types) and any( yt in y_types for yt in pkg_y_types ): @@ -77,7 +75,7 @@ def check_loss_output_shape(pkg, y_pred, y_true): """ Check that the output shape of the loss matches the expected shape from tags. """ - expected_ndim = pkg._tags.get("loss_ndim", None) + expected_ndim = pkg.get_class_tag("loss_ndim", None) loss_instance = pkg.get_cls()() result = loss_instance(y_pred, y_true) if expected_ndim is not None: From 70cc6050d098e56d461248ebb6b234570d9ecbe8 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Fri, 19 Sep 2025 10:59:02 +0530 Subject: [PATCH 19/22] Updates --- .../_mqf2/_mqf2_distribution_loss_pkg.py | 19 ++++++++++++++----- .../_cross_entropy/_cross_entropy_pkg.py | 1 + .../metrics/_point_pkg/_mae/_mae_pkg.py | 1 + .../metrics/_point_pkg/_mape/_mape_pkg.py | 1 + .../metrics/_point_pkg/_mase/_mase_pkg.py | 1 + .../_point_pkg/_poisson/_poisson_loss_pkg.py | 1 + .../metrics/_point_pkg/_rmse/_rmse_pkg.py | 1 + .../metrics/_point_pkg/_smape/_smape_pkg.py | 1 + .../_point_pkg/_tweedie/_tweedie_loss_pkg.py | 1 + .../_quantile_pkg/_quantile_loss_pkg.py | 1 + .../models/nhits/_nhits_pkg.py | 15 +++++++++++++++ 11 files changed, 38 insertions(+), 5 deletions(-) diff --git a/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py b/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py index 25182c279..23b921475 100644 --- a/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py +++ b/pytorch_forecasting/metrics/_distributions_pkg/_mqf2/_mqf2_distribution_loss_pkg.py @@ -19,14 +19,23 @@ class MQF2DistributionLoss_pkg(_BasePtMetric): "python_dependencies": ["cpflows"], "capability:quantile_generation": True, "requires:data_type": "mqf2_distribution_forecast", - "clip_target": True, - "data_loader_kwargs": { + } + + @property + def clip_target(self): + return True + + @property + def data_loader_kwargs(self): + return { "target_normalizer": GroupNormalizer( groups=["agency", "sku"], center=False, transformation="log1p" ) - }, - "trainer_kwargs": dict(accelerator="cpu"), - } + } + + @property + def trainer_kwargs(self): + return dict(accelerator="cpu") @classmethod def get_cls(cls): diff --git a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py index 23f5f90bf..60d9f84f2 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_cross_entropy/_cross_entropy_pkg.py @@ -20,6 +20,7 @@ class CrossEntropy_pkg(_BasePtMetric): "no_rescaling": True, "info:pred_type": ["point"], "info:y_type": ["category"], + "loss_ndim": 1, } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py index f1cbb413a..15632ce60 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mae/_mae_pkg.py @@ -18,6 +18,7 @@ class MAE_pkg(_BasePtMetric): "info:metric_name": "MAE", "info:pred_type": ["point"], "info:y_type": ["numeric"], + "loss_ndim": 1, } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py index c2dc963b8..1db954132 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mape/_mape_pkg.py @@ -20,6 +20,7 @@ class MAPE_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "info:pred_type": ["point"], "info:y_type": ["numeric"], + "loss_ndim": 1, } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py index 8c7ba2a09..ea7afe8e6 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_mase/_mase_pkg.py @@ -16,6 +16,7 @@ class MASE_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "info:pred_type": ["point"], "info:y_type": ["numeric"], + "loss_ndim": 1, } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py index 2dcbf5be4..366c30ba6 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_poisson/_poisson_loss_pkg.py @@ -20,6 +20,7 @@ class PoissonLoss_pkg(_BasePtMetric): "shape:adds_quantile_dimension": True, "info:pred_type": ["point"], "info:y_type": ["numeric"], + "loss_ndim": 1, } @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py index 9bec3eb76..a7c7509b9 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_rmse/_rmse_pkg.py @@ -18,6 +18,7 @@ class RMSE_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "info:pred_type": ["point"], "info:y_type": ["numeric"], + "loss_ndim": 1, } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py index 1aace449d..b192b74c7 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_smape/_smape_pkg.py @@ -20,6 +20,7 @@ class SMAPE_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "info:pred_type": ["point"], "info:y_type": ["numeric"], + "loss_ndim": 1, } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py index 6bd9fc7b3..d6ccbed70 100644 --- a/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py +++ b/pytorch_forecasting/metrics/_point_pkg/_tweedie/_tweedie_loss_pkg.py @@ -18,6 +18,7 @@ class TweedieLoss_pkg(_BasePtMetric): "requires:data_type": "point_forecast", "info:pred_type": ["point"], "info:y_types": ["numeric"], + "loss_ndim": 1, } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 05d3a5e85..65fda0314 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -18,6 +18,7 @@ class QuantileLoss_pkg(_BasePtMetric): "requires:data_type": "quantile_forecast", "info:pred_type": ["quantile"], "info:y_type": ["numeric"], + "loss_ndim": 2, } # noqa: E501 @classmethod diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index 17256a9b7..685f52c17 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -51,6 +51,20 @@ def get_base_test_params(cls): @classmethod def _get_test_dataloaders_from(cls, params): + """Get dataloaders from parameters. + + Parameters + ---------- + params : dict + Parameters to create dataloaders. + One of the elements in the list returned by ``get_test_train_params``. + + Returns + ------- + dataloaders : dict with keys "train", "val", "test", values torch DataLoader + Dict of dataloaders created from the parameters. + Train, validation, and test dataloaders, in this order. + """ loss = params.get("loss", None) data_loader_kwargs = params.get("data_loader_kwargs", {}) clip_target = params.get("clip_target", False) @@ -97,6 +111,7 @@ def _get_test_dataloaders_from(cls, params): if isinstance(loss, NegativeBinomialDistributionLoss): dwc = dwc.assign(volume=lambda x: x.volume.round()) + # todo: still need some debugging to add the MQF2DistributionLoss elif isinstance(loss, LogNormalDistributionLoss): dwc["volume"] = dwc["volume"].clip(1e-3, 1.0) return make_dataloaders(dwc, **dl_default_kwargs) From f5b79b7f340ccba3ebc4ac18756b6e090484e6a2 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 20 Sep 2025 01:03:25 +0530 Subject: [PATCH 20/22] Changed loss_ndim to num_quantiles --- pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 65fda0314..86c1d1e89 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -18,7 +18,7 @@ class QuantileLoss_pkg(_BasePtMetric): "requires:data_type": "quantile_forecast", "info:pred_type": ["quantile"], "info:y_type": ["numeric"], - "loss_ndim": 2, + "num_quantiles": 2, } # noqa: E501 @classmethod From cd87f4172a320b25107d0cb021ec564cf478af5a Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sat, 20 Sep 2025 01:21:46 +0530 Subject: [PATCH 21/22] Added loss_ndim: num_quantiles --- pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py index 86c1d1e89..50751daf9 100644 --- a/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py +++ b/pytorch_forecasting/metrics/_quantile_pkg/_quantile_loss_pkg.py @@ -18,7 +18,7 @@ class QuantileLoss_pkg(_BasePtMetric): "requires:data_type": "quantile_forecast", "info:pred_type": ["quantile"], "info:y_type": ["numeric"], - "num_quantiles": 2, + "loss_ndim": "num_quantiles", } # noqa: E501 @classmethod From d5765cadf1f7752ddb14668165827f9fd79c84a4 Mon Sep 17 00:00:00 2001 From: paramthakkar123 Date: Sun, 21 Sep 2025 01:09:35 +0530 Subject: [PATCH 22/22] Updates --- .../models/nhits/_nhits_pkg.py | 31 ++++----- pytorch_forecasting/tests/_loss_mapping.py | 68 ++----------------- .../tests/test_all_estimators.py | 21 +++--- 3 files changed, 27 insertions(+), 93 deletions(-) diff --git a/pytorch_forecasting/models/nhits/_nhits_pkg.py b/pytorch_forecasting/models/nhits/_nhits_pkg.py index 685f52c17..710be45a2 100644 --- a/pytorch_forecasting/models/nhits/_nhits_pkg.py +++ b/pytorch_forecasting/models/nhits/_nhits_pkg.py @@ -70,12 +70,7 @@ def _get_test_dataloaders_from(cls, params): clip_target = params.get("clip_target", False) from pytorch_forecasting.metrics import ( - BetaDistributionLoss, - LogNormalDistributionLoss, MQF2DistributionLoss, - MultivariateNormalDistributionLoss, - NegativeBinomialDistributionLoss, - TweedieLoss, ) from pytorch_forecasting.tests._data_scenarios import ( data_with_covariates, @@ -84,18 +79,16 @@ def _get_test_dataloaders_from(cls, params): ) # Use fixed window dataloaders for MultivariateNormalDistributionLoss - if isinstance(loss, MultivariateNormalDistributionLoss): + if hasattr( + loss, "get_class_tag" + ) and "multivariate_normal" in loss.get_class_tag("distribution_type", ""): return dataloaders_fixed_window_without_covariates() # For other distribution losses, use covariates and apply preprocessing - if isinstance( - loss, - ( - LogNormalDistributionLoss, - NegativeBinomialDistributionLoss, - MQF2DistributionLoss, - BetaDistributionLoss, - ), + distr_types = {"log_normal", "negative_binomial", "mqf2", "beta"} + if ( + hasattr(loss, "get_class_tag") + and loss.get_class_tag("distribution_type", "") in distr_types ): dwc = data_with_covariates() if clip_target: @@ -109,14 +102,16 @@ def _get_test_dataloaders_from(cls, params): ) dl_default_kwargs.update(data_loader_kwargs) - if isinstance(loss, NegativeBinomialDistributionLoss): + if loss.get_class_tag("distribution_type", "") == "negative_binomial": dwc = dwc.assign(volume=lambda x: x.volume.round()) - # todo: still need some debugging to add the MQF2DistributionLoss - elif isinstance(loss, LogNormalDistributionLoss): + elif loss.get_class_tag("distribution_type", "") == "log_normal": dwc["volume"] = dwc["volume"].clip(1e-3, 1.0) return make_dataloaders(dwc, **dl_default_kwargs) - if isinstance(loss, TweedieLoss): + if ( + hasattr(loss, "get_class_tag") + and loss.get_class_tag("info:metric_name", "") == "TweedieLoss" + ): dwc = data_with_covariates() dl_default_kwargs = dict( target="target", diff --git a/pytorch_forecasting/tests/_loss_mapping.py b/pytorch_forecasting/tests/_loss_mapping.py index b676eb4dc..4664f40b4 100644 --- a/pytorch_forecasting/tests/_loss_mapping.py +++ b/pytorch_forecasting/tests/_loss_mapping.py @@ -1,57 +1,15 @@ -from pytorch_forecasting.metrics._distributions_pkg import ( - BetaDistributionLoss_pkg, - ImplicitQuantileNetworkDistributionLoss_pkg, - LogNormalDistributionLoss_pkg, - MQF2DistributionLoss_pkg, - MultivariateNormalDistributionLoss_pkg, - NegativeBinomialDistributionLoss_pkg, - NormalDistributionLoss_pkg, -) -from pytorch_forecasting.metrics._point_pkg import ( - CrossEntropy_pkg, - MAE_pkg, - MAPE_pkg, - MASE_pkg, - PoissonLoss_pkg, - RMSE_pkg, - SMAPE_pkg, - TweedieLoss_pkg, -) -from pytorch_forecasting.metrics._quantile_pkg import QuantileLoss_pkg +from pytorch_forecasting._registry import all_objects # Remove legacy lists and mappings for losses by pred/y type and tensor shape checks. # Use tags and _get_test_dataloaders_from for all compatibility and test setup. -METRIC_PKGS = [ - BetaDistributionLoss_pkg, - NegativeBinomialDistributionLoss_pkg, - MultivariateNormalDistributionLoss_pkg, - LogNormalDistributionLoss_pkg, - NormalDistributionLoss_pkg, - ImplicitQuantileNetworkDistributionLoss_pkg, - MAE_pkg, - MAPE_pkg, - MASE_pkg, - RMSE_pkg, - SMAPE_pkg, - PoissonLoss_pkg, - TweedieLoss_pkg, - CrossEntropy_pkg, - QuantileLoss_pkg, -] - -LOSS_SPECIFIC_PARAMS = { - pkg.get_class_tag("info:metric_name", pkg.__name__.replace("_pkg", "")): { - "clip_target": getattr(pkg(), "clip_target", False), - "data_loader_kwargs": getattr(pkg(), "data_loader_kwargs", {}), - } - for pkg in METRIC_PKGS -} +METRIC_PKGS = all_objects(object_types="metric", return_names=False) def get_compatible_losses(pred_types, y_types): """ Get compatible losses based on prediction types and target types. + Returns a list of (pkg, loss_instance) tuples. """ compatible_losses = [] for pkg in METRIC_PKGS: @@ -60,23 +18,5 @@ def get_compatible_losses(pred_types, y_types): if any(pt in pred_types for pt in pkg_pred_types) and any( yt in y_types for yt in pkg_y_types ): - compatible_losses.append(pkg.get_cls()()) + compatible_losses.append((pkg, pkg.get_cls()())) return compatible_losses - - -def get_test_dataloaders_for_loss(pkg, params=None): - """ - Get test dataloaders for a given loss package using its method. - """ - return pkg._get_test_dataloaders_from(params or {}) - - -def check_loss_output_shape(pkg, y_pred, y_true): - """ - Check that the output shape of the loss matches the expected shape from tags. - """ - expected_ndim = pkg.get_class_tag("loss_ndim", None) - loss_instance = pkg.get_cls()() - result = loss_instance(y_pred, y_true) - if expected_ndim is not None: - assert result.ndim == expected_ndim diff --git a/pytorch_forecasting/tests/test_all_estimators.py b/pytorch_forecasting/tests/test_all_estimators.py index 08a996c52..a6d0e811e 100644 --- a/pytorch_forecasting/tests/test_all_estimators.py +++ b/pytorch_forecasting/tests/test_all_estimators.py @@ -12,7 +12,6 @@ from pytorch_forecasting.tests._base._fixture_generator import BaseFixtureGenerator from pytorch_forecasting.tests._config import EXCLUDE_ESTIMATORS, EXCLUDED_TESTS from pytorch_forecasting.tests._loss_mapping import ( - LOSS_SPECIFIC_PARAMS, get_compatible_losses, ) @@ -195,16 +194,16 @@ def _generate_final_param_list(self, compatible_losses, base_params_list): """ all_train_kwargs = [] train_kwargs_names = [] - for loss_item in compatible_losses: - if inspect.isclass(loss_item): - loss_name = loss_item.__name__ - loss = loss_item - else: - loss_name = loss_item.__class__.__name__ - loss = loss_item - loss_params = deepcopy(LOSS_SPECIFIC_PARAMS.get(loss_name, {})) - loss_params["loss"] = loss - + for pkg_cls, loss in compatible_losses: + loss_name = loss.__class__.__name__ + pkg_instance = pkg_cls() + clip_target = getattr(pkg_instance, "clip_target", False) + data_loader_kwargs = getattr(pkg_instance, "data_loader_kwargs", {}) + loss_params = { + "clip_target": clip_target, + "data_loader_kwargs": data_loader_kwargs, + "loss": loss, + } for i, base_params in enumerate(base_params_list): final_params = _nested_update(base_params, loss_params) all_train_kwargs.append(final_params)