Skip to content

add available device to regression tests #3335 #3394

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
66755ad
add available device to test_canberra_metric.py
BanzaiTokyo Apr 24, 2025
9229e3b
add _double_dtype ad dtype when transfrring errors to device
BanzaiTokyo Apr 24, 2025
2f6320a
available devices in test_fractional_absolute_error.py, test_fraction…
BanzaiTokyo Apr 24, 2025
557f549
when transferring to device use dtype
BanzaiTokyo Apr 24, 2025
0130773
add available device to tests
BanzaiTokyo Apr 24, 2025
94a002b
use self._double_dtype instead of torch.double
BanzaiTokyo Apr 24, 2025
2631377
use self._double_dtype when moving to device in epoch_metric.py
BanzaiTokyo Apr 24, 2025
d5b9e5a
removes unnecessary tests
BanzaiTokyo Apr 24, 2025
f99b643
rollbacks changes in epoch_metric.py
BanzaiTokyo Apr 24, 2025
e24ce01
redo test_integration
BanzaiTokyo Apr 24, 2025
3dbbe1e
redo test_integration
BanzaiTokyo Apr 24, 2025
1cf59fa
casting of eps in _update
BanzaiTokyo Apr 24, 2025
6f0599d
more conversions to torch
BanzaiTokyo Apr 24, 2025
35527d5
in _torch_median move output to cpu if mps (torch.kthvalue is not sup…
BanzaiTokyo Apr 25, 2025
c13837e
fixing test_degenerated_sample
BanzaiTokyo Apr 25, 2025
c85dab1
fixing test_degenerated_sample
BanzaiTokyo Apr 25, 2025
c662c44
rename upper case variables
BanzaiTokyo Apr 25, 2025
e471064
change range to 3
BanzaiTokyo Apr 25, 2025
37a0469
rewrite test_compute
BanzaiTokyo Apr 25, 2025
71af57e
rewrite test_fractional_bias
BanzaiTokyo Apr 25, 2025
d59cb6f
remove prints
BanzaiTokyo Apr 25, 2025
da2e75d
rollback eps in canberra_metric.py
BanzaiTokyo Apr 25, 2025
0a2f6d4
rollback test_epoch_metric.py because the changes are moved to a sepa…
BanzaiTokyo Apr 25, 2025
d1ef2d4
Merge branch 'master' into regression_tests_add_available_device
BanzaiTokyo Apr 25, 2025
667332d
set sum_of_errors as _double_dtype
BanzaiTokyo Apr 28, 2025
713aab9
Merge branch 'master' into regression_tests_add_available_device
BanzaiTokyo Apr 28, 2025
579d035
use torch instead of numpy where possible in test_canberra_metric.py
BanzaiTokyo Apr 28, 2025
cab29ca
Merge branch 'master' into regression_tests_add_available_device
BanzaiTokyo Apr 29, 2025
e6c96de
remove double_dtype from metrics
BanzaiTokyo Apr 29, 2025
346e0e1
takes into account PR comments
BanzaiTokyo May 2, 2025
ded98cf
refactor integration tests for fractional bias and fractional absolut…
BanzaiTokyo May 2, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions ignite/metrics/regression/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]) -> None:


def _torch_median(output: torch.Tensor) -> float:
if output.device.type == "mps":
output = output.cpu()

output = output.view(-1)
len_ = len(output)

Expand Down
5 changes: 3 additions & 2 deletions ignite/metrics/regression/canberra_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,9 @@ def reset(self) -> None:

def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y) + 1e-15)
self._sum_of_errors += torch.sum(errors).to(self._device)
eps = torch.tensor(1e-15, dtype=self._double_dtype, device=y.device)
errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y) + eps)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)

@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/regression/fractional_absolute_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def reset(self) -> None:
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = 2 * torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))
self._sum_of_errors += torch.sum(errors).to(self._device)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)
self._num_examples += y.shape[0]

@sync_all_reduce("_num_examples", "_sum_of_errors")
Expand Down
4 changes: 2 additions & 2 deletions ignite/metrics/regression/fractional_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,13 @@ class FractionalBias(_BaseRegression):

@reinit__is_reduced
def reset(self) -> None:
self._sum_of_errors = torch.tensor(0.0, dtype=torch.double, device=self._device)
self._sum_of_errors = torch.tensor(0.0, dtype=self._double_dtype, device=self._device)
self._num_examples = 0

def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = 2 * (y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred) + 1e-30)
self._sum_of_errors += torch.sum(errors).to(self._device)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)
self._num_examples += y.shape[0]

@sync_all_reduce("_sum_of_errors", "_num_examples")
Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/regression/geometric_mean_absolute_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def reset(self) -> None:
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))
self._sum_of_errors += torch.sum(errors).to(self._device)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)
self._num_examples += y.shape[0]

@sync_all_reduce("_sum_of_errors", "_num_examples")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def reset(self) -> None:
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()

y_pred = y_pred.clone().to(self._device)
y = y.clone().to(self._device)
y_pred = y_pred.clone().to(dtype=self._double_dtype, device=self._device)
y = y.clone().to(dtype=self._double_dtype, device=self._device)

self._predictions.append(y_pred)
self._targets.append(y)
Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/regression/manhattan_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def reset(self) -> None:
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
errors = torch.abs(y - y_pred)
self._sum_of_errors += torch.sum(errors).to(self._device)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)

@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
Expand Down
4 changes: 3 additions & 1 deletion ignite/metrics/regression/mean_absolute_relative_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
if (y == 0).any():
raise NotComputableError("The ground truth has 0.")
absolute_error = torch.abs(y_pred - y.view_as(y_pred)) / torch.abs(y.view_as(y_pred))
self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(self._device)
self._sum_of_absolute_relative_errors += torch.sum(absolute_error).to(
dtype=self._double_dtype, device=self._device
)
self._num_samples += y.size()[0]

@sync_all_reduce("_sum_of_absolute_relative_errors", "_num_samples")
Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/regression/mean_normalized_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
raise NotComputableError("The ground truth has 0.")

errors = (y.view_as(y_pred) - y_pred) / y
self._sum_of_errors += torch.sum(errors).to(self._device)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)
self._num_examples += y.shape[0]

@sync_all_reduce("_sum_of_errors", "_num_examples")
Expand Down
10 changes: 5 additions & 5 deletions ignite/metrics/regression/pearson_correlation.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,11 @@ def reset(self) -> None:

def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
self._sum_of_y_preds += y_pred.sum().to(self._device)
self._sum_of_ys += y.sum().to(self._device)
self._sum_of_y_pred_squares += y_pred.square().sum().to(self._device)
self._sum_of_y_squares += y.square().sum().to(self._device)
self._sum_of_products += (y_pred * y).sum().to(self._device)
self._sum_of_y_preds += y_pred.sum().to(dtype=self._double_dtype, device=self._device)
self._sum_of_ys += y.sum().to(dtype=self._double_dtype, device=self._device)
self._sum_of_y_pred_squares += y_pred.square().sum().to(dtype=self._double_dtype, device=self._device)
self._sum_of_y_squares += y.square().sum().to(dtype=self._double_dtype, device=self._device)
self._sum_of_products += (y_pred * y).sum().to(dtype=self._double_dtype, device=self._device)
self._num_examples += y.shape[0]

@sync_all_reduce(
Expand Down
6 changes: 3 additions & 3 deletions ignite/metrics/regression/r2_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,10 @@ def reset(self) -> None:
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
self._num_examples += y.shape[0]
self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)
self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(dtype=self._double_dtype, device=self._device)

self._y_sum += torch.sum(y).to(self._device)
self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)
self._y_sum += torch.sum(y).to(dtype=self._double_dtype, device=self._device)
self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(dtype=self._double_dtype, device=self._device)

@sync_all_reduce("_num_examples", "_sum_of_errors", "_y_sq_sum", "_y_sum")
def compute(self) -> float:
Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/regression/wave_hedges_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def reset(self) -> None:
def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.max(y_pred, y.view_as(y_pred)) + 1e-30)
self._sum_of_errors += torch.sum(errors).to(self._device)
self._sum_of_errors += torch.sum(errors).to(dtype=self._double_dtype, device=self._device)

@sync_all_reduce("_sum_of_errors")
def compute(self) -> float:
Expand Down
68 changes: 36 additions & 32 deletions tests/ignite/metrics/regression/test_canberra_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,15 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4)))


def test_compute():
def test_compute(available_device):
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)

m = CanberraMetric()
m = CanberraMetric(device=available_device)
assert m._device == torch.device(available_device)

canberra = DistanceMetric.get_metric("canberra")

Expand Down Expand Up @@ -58,45 +59,48 @@ def test_compute():
assert canberra.pairwise([v1, v2])[0][1] == pytest.approx(np_sum)


def test_integration():
def _test(y_pred, y, batch_size):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)

engine = Engine(update_fn)
@pytest.mark.parametrize("n_times", range(5))
@pytest.mark.parametrize(
"test_cases",
[
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
],
)
def test_integration(n_times, test_cases, available_device):
y_pred, y, batch_size = test_cases
assert y_pred.dtype == torch.float32
assert y.dtype == torch.float32

m = CanberraMetric()
m.attach(engine, "cm")
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = y[idx : idx + batch_size].to(dtype=torch.float32)
y_pred_batch = y_pred[idx : idx + batch_size].to(dtype=torch.float32)
return y_pred_batch, y_true_batch

np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
engine = Engine(update_fn)

canberra = DistanceMetric.get_metric("canberra")
m = CanberraMetric(device=available_device)
print(f"m's dtype: {m._double_dtype}")
assert m._device == torch.device(available_device)

data = list(range(y_pred.shape[0] // batch_size))
cm = engine.run(data, max_epochs=1).metrics["cm"]
m.attach(engine, "cm")
print(f"m's dtype again: {m._double_dtype}")

assert canberra.pairwise([np_y_pred, np_y])[0][1] == pytest.approx(cm)
canberra = DistanceMetric.get_metric("canberra")

def get_test_cases():
test_cases = [
(torch.rand(size=(100,)), torch.rand(size=(100,)), 10),
(torch.rand(size=(100, 1)), torch.rand(size=(100, 1)), 20),
]
return test_cases
data = list(range(y_pred.shape[0] // batch_size))
cm = engine.run(data, max_epochs=1).metrics["cm"]

for _ in range(5):
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
X = y_pred.cpu().numpy().reshape(len(y_pred), -1)
Y = y.cpu().numpy().reshape(len(y), -1)
expected = np.sum(canberra.pairwise(X, Y).diagonal())
assert expected == pytest.approx(cm)


def test_error_is_not_nan():
m = CanberraMetric()
def test_error_is_not_nan(available_device):
m = CanberraMetric(device=available_device)
assert m._device == torch.device(available_device)
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,15 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4)))


def test_compute():
def test_compute(available_device):
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)

m = FractionalAbsoluteError()
m = FractionalAbsoluteError(device=available_device)
assert m._device == torch.device(available_device)

m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (2 * np.abs((a - ground_truth)) / (np.abs(a) + np.abs(ground_truth))).sum()
Expand All @@ -62,8 +63,8 @@ def test_compute():
assert m.compute() == pytest.approx(np_ans)


def test_integration():
def _test(y_pred, y, batch_size):
def test_integration(available_device):
def _test(y_pred, y, batch_size, device="cpu"):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
Expand All @@ -72,7 +73,8 @@ def update_fn(engine, batch):

engine = Engine(update_fn)

m = FractionalAbsoluteError()
m = FractionalAbsoluteError(device=device)
assert m._device == torch.device(device)
m.attach(engine, "fab")

np_y = y.numpy().ravel()
Expand All @@ -98,7 +100,7 @@ def get_test_cases():
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
_test(y_pred, y, batch_size, device=available_device)


def _test_distrib_compute(device):
Expand Down
20 changes: 12 additions & 8 deletions tests/ignite/metrics/regression/test_fractional_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,15 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4)))


def test_fractional_bias():
def test_fractional_bias(available_device):
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)

m = FractionalBias()
m = FractionalBias(device=available_device)
assert m._device == torch.device(available_device)

m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_sum = (2 * (ground_truth - a) / (a + ground_truth)).sum()
Expand All @@ -62,8 +63,8 @@ def test_fractional_bias():
assert m.compute() == pytest.approx(np_ans)


def test_integration():
def _test(y_pred, y, batch_size):
def test_integration(available_device):
def _test(y_pred, y, batch_size, device="cpu"):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
Expand All @@ -72,7 +73,9 @@ def update_fn(engine, batch):

engine = Engine(update_fn)

m = FractionalBias()
m = FractionalBias(device=device)
assert m._device == torch.device(device)

m.attach(engine, "fb")

np_y = y.double().numpy().ravel()
Expand All @@ -98,11 +101,12 @@ def get_test_cases():
# check multiple random inputs as random exact occurencies are rare
test_cases = get_test_cases()
for y_pred, y, batch_size in test_cases:
_test(y_pred, y, batch_size)
_test(y_pred, y, batch_size, device=available_device)


def test_error_is_not_nan():
m = FractionalBias()
def test_error_is_not_nan(available_device):
m = FractionalBias(device=available_device)
assert m._device == torch.device(available_device)
m.update((torch.zeros(4), torch.zeros(4)))
assert not (torch.isnan(m._sum_of_errors).any() or torch.isinf(m._sum_of_errors).any()), m._sum_of_errors

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,16 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4)))


def test_compute():
def test_compute(available_device):
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
np_prod = 1.0

m = GeometricMeanAbsoluteError()
m = GeometricMeanAbsoluteError(device=available_device)
assert m._device == torch.device(available_device)
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))

errors = np.abs(ground_truth - a)
Expand Down Expand Up @@ -67,8 +68,8 @@ def test_compute():
assert m.compute() == pytest.approx(np_ans)


def test_integration():
def _test(y_pred, y, batch_size):
def test_integration(available_device):
def _test(y_pred, y, batch_size, device="cpu"):
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
Expand All @@ -77,7 +78,9 @@ def update_fn(engine, batch):

engine = Engine(update_fn)

m = GeometricMeanAbsoluteError()
m = GeometricMeanAbsoluteError(device=device)
assert m._device == torch.device(device)

m.attach(engine, "gmae")

np_y = y.numpy().ravel()
Expand Down
Loading
Loading