Skip to content

Commit 5e2b8dc

Browse files
committed
Merge remote-tracking branch 'origin/main' into 467-update-docs
2 parents a6af6df + 5075324 commit 5e2b8dc

File tree

6 files changed

+2
-159
lines changed

6 files changed

+2
-159
lines changed

autoemulate/experimental/data/preprocessors.py

Lines changed: 0 additions & 42 deletions
This file was deleted.

autoemulate/experimental/emulators/base.py

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
from torch.distributions import TransformedDistribution
1010
from torch.optim.lr_scheduler import ExponentialLR, LRScheduler
1111

12-
from autoemulate.experimental.data.preprocessors import Preprocessor
1312
from autoemulate.experimental.data.utils import ConversionMixin, ValidationMixin
1413
from autoemulate.experimental.device import TorchDeviceMixin
1514
from autoemulate.experimental.transforms.standardize import StandardizeTransform
@@ -392,7 +391,7 @@ def predict(self, x: TensorLike, with_grad: bool = False) -> GaussianLike:
392391
return pred
393392

394393

395-
class PyTorchBackend(nn.Module, Emulator, Preprocessor):
394+
class PyTorchBackend(nn.Module, Emulator):
396395
"""
397396
`PyTorchBackend` provides a backend for PyTorch models.
398397
@@ -410,19 +409,13 @@ class PyTorchBackend(nn.Module, Emulator, Preprocessor):
410409
epochs: int = 10
411410
loss_history: ClassVar[list[float]] = []
412411
verbose: bool = False
413-
preprocessor: Preprocessor | None = None
414412
loss_fn: nn.Module = nn.MSELoss()
415413
optimizer_cls: type[optim.Optimizer] = optim.Adam
416414
optimizer: optim.Optimizer
417415
supports_grad: bool = True
418416
lr: float = 1e-1
419417
scheduler_cls: type[LRScheduler] | None = None
420418

421-
def preprocess(self, x: TensorLike) -> TensorLike: # noqa: D102
422-
if self.preprocessor is None:
423-
return x
424-
return self.preprocessor.preprocess(x)
425-
426419
def loss_func(self, y_pred, y_true):
427420
"""Loss function to be used for training the model."""
428421
return nn.MSELoss()(y_pred, y_true)
@@ -455,9 +448,6 @@ def _fit(
455448
batches = 0
456449

457450
for x_batch, y_batch in dataloader:
458-
# Preprocess x_batch
459-
x = self.preprocess(x_batch)
460-
461451
# Forward pass
462452
y_pred = self.forward(x_batch)
463453
loss = self.loss_func(y_pred, y_batch)
@@ -535,7 +525,6 @@ def _initialize_weights(
535525
def _predict(self, x: TensorLike, with_grad: bool) -> OutputLike:
536526
self.eval()
537527
with torch.set_grad_enabled(with_grad):
538-
x = self.preprocess(x)
539528
return self(x)
540529

541530

autoemulate/experimental/emulators/ensemble.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -268,10 +268,7 @@ def _predict(self, x: Tensor, with_grad: bool) -> GaussianLike:
268268
samples = []
269269
with torch.set_grad_enabled(with_grad):
270270
for _ in range(self.n_samples):
271-
# apply any preprocessing the model expects
272-
x_proc = self.model.preprocess(x)
273-
274-
out = self.model.forward(x_proc)
271+
out = self.model.forward(x)
275272
# out: Tensor of shape (batch_size, output_dim)
276273
samples.append(out)
277274

autoemulate/experimental/emulators/radial_basis_functions.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ def _predict(self, x: TensorLike, with_grad: bool) -> OutputLike:
8989
msg = "Gradient calculation is not supported."
9090
raise ValueError(msg)
9191
self.eval()
92-
x = self.preprocess(x)
9392
return self(x)
9493

9594
@staticmethod

tests/experimental/test_experimental_base.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import numpy as np
22
import pytest
33
import torch
4-
from autoemulate.experimental.data.preprocessors import Standardizer
54
from autoemulate.experimental.data.utils import set_random_seed
65
from autoemulate.experimental.device import TorchDeviceMixin
76
from autoemulate.experimental.emulators.base import PyTorchBackend
@@ -32,9 +31,6 @@ def __init__(self, x=None, y=None, random_seed=None, device=None, **kwargs):
3231
self.scheduler_setup(kwargs)
3332
self.epochs = kwargs.get("epochs", 10)
3433
self.batch_size = kwargs.get("batch_size", 16)
35-
self.preprocessor = Standardizer(
36-
torch.Tensor([[-0.5]]), torch.Tensor([[0.5]])
37-
)
3834

3935
def forward(self, x):
4036
return self.linear(x)
@@ -110,24 +106,6 @@ def test_tune_xy(self):
110106
tuner = Tuner(x_train, y_train, n_iter=10)
111107
tuner.run(self.DummyModel)
112108

113-
def test_standardizer(self):
114-
x_train = torch.Tensor(
115-
[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]
116-
)
117-
x_train_preprocessed = self.model.preprocess(x_train)
118-
assert isinstance(x_train_preprocessed, torch.Tensor)
119-
assert torch.allclose(
120-
x_train_preprocessed,
121-
torch.Tensor([[3.0], [5.0], [7.0], [9.0], [11.0]]),
122-
)
123-
124-
def test_standardizer_fail(self):
125-
x_train = torch.Tensor([0.1, 2.0, 6.0, 0.2])
126-
with pytest.raises(
127-
ValueError, match="Expected 2D TensorLike, actual shape dim 1"
128-
):
129-
self.model.preprocess(x_train)
130-
131109
def test_tune_dataset(self):
132110
"""
133111
Test that Tuner accepts a single Dataset input.

tests/experimental/test_experimental_preprocessors.py

Lines changed: 0 additions & 78 deletions
This file was deleted.

0 commit comments

Comments
 (0)