-
Notifications
You must be signed in to change notification settings - Fork 1
/
test_funcs.py
42 lines (35 loc) · 1.65 KB
/
test_funcs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import torch
import copy
from tqdm import tqdm
import numpy as np
def test_autoencoder_dataloader(device, model, dataloader_test, shapedata, mm_constant = 1000):
model.eval()
l1_loss = 0
l2_loss = 0
shapedata_mean = torch.Tensor(shapedata.mean).to(device)
shapedata_std = torch.Tensor(shapedata.std).to(device)
with torch.no_grad():
for i, sample_dict in enumerate(tqdm(dataloader_test)):
tx = sample_dict['points'].to(device)
prediction = model(tx) # torch.Size([batch_size,num_vertices,3])
if i == 0:
predictions = copy.deepcopy(prediction)
testset = copy.deepcopy(tx[:, :-1])
else:
predictions = torch.cat([predictions,prediction],0)
testset = torch.cat([testset, tx[:, :-1]], 0)
if dataloader_test.dataset.dummy_node: # entra qui
x_recon = prediction[:, :-1]
x = tx[:, :-1]
else:
x_recon = prediction
x = tx
l1_loss += torch.mean(torch.abs(x_recon-x))*x.shape[0]/float(len(dataloader_test.dataset))
x_recon = (x_recon * shapedata_std + shapedata_mean) * mm_constant
x = (x * shapedata_std + shapedata_mean) * mm_constant
l2_loss += torch.mean(torch.sqrt(torch.sum((x_recon - x)**2, dim=2))) * x.shape[0]/float(len(dataloader_test.dataset))
testset = (testset * shapedata_std + shapedata_mean) * mm_constant
predictions = predictions.cpu().numpy()
l1_loss = l1_loss.item()
l2_loss = l2_loss.item()
return predictions, testset.cpu().numpy(), l1_loss, l2_loss