-
Notifications
You must be signed in to change notification settings - Fork 0
/
intermediate_representations.py
125 lines (115 loc) · 5.9 KB
/
intermediate_representations.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from src.models.model_with_dialogue_moves import Model as ToMModel
from src.models.dialogue_act_classification_model import Model as DActModel
from src.data.game_parser import GameParser
import torch
import argparse
import json
import os
import numpy as np
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
MODEL_TYPES = {
'GRU' : 0,
'LSTM' : 1,
'Transformer' : 2,
}
def main(args):
# model_file = "models/ToM_models/dialogue_No_pov_First_Transformer_6_a.torch"
model_file = "models/gt_dialogue_moves_bootstrap_DlgMoveFirst/gt_dialogue_moves_Transformer_dlgMove_No_dlg_Yes_pov_First_exp6_seed_2_VidFirst.torch"
use_dialogue = "Yes" #model_file.split("_dlg_")[-1].split["_"][0]
model_type_name = "Transformer"#model_file.split("_")[-3]
model_type = MODEL_TYPES[model_type_name]
model = ToMModel(model_type).to(DEVICE)
model.load_state_dict(torch.load(model_file))
dataset_splits = json.load(open('config/dataset_splits.json'))
for set in dataset_splits.values():
for path in set:
for pov in [1, 2]:
out_file = f'{path}/intermediate_ToM6_{path.split("/")[-1]}_player{pov}.npz'
if os.path.isfile(out_file):
continue
game = GameParser(path,use_dialogue=='Yes',pov,0,True)
l = model(game, global_plan=False, player_plan=True,intermediate=True).cpu().data.numpy()
np.savez_compressed(open(out_file,'wb'), data=l)
print(out_file,l.shape,model_type_name,use_dialogue,use_dialogue=='Yes')
# break
# break
# break
# model_file = "models/ToM_models/dialogue_No_pov_First_Transformer_7_i.torch"
model_file = "models/gt_dialogue_moves_bootstrap_DlgMoveFirst/gt_dialogue_moves_Transformer_dlgMove_Yes_dlg_No_pov_None_exp7_seed_5.torch"
use_dialogue = "No"#model_file.split("_")[-6]
model_type_name = 'Transformer'#model_file.split("_")[-3]
model_type = MODEL_TYPES[model_type_name]
model = ToMModel(model_type).to(DEVICE)
model.load_state_dict(torch.load(model_file))
dataset_splits = json.load(open('config/dataset_splits.json'))
for set in dataset_splits.values():
for path in set:
for pov in [1, 2]:
out_file = f'{path}/intermediate_ToM7_{path.split("/")[-1]}_player{pov}.npz'
if os.path.isfile(out_file):
continue
game = GameParser(path,use_dialogue=='Yes',4,0,True)
l = model(game, global_plan=False, player_plan=True,intermediate=True).cpu().data.numpy()
np.savez_compressed(open(out_file,'wb'), data=l)
print(out_file,l.shape,model_type_name,use_dialogue,use_dialogue=='Yes')
# break
# break
# break
# model_file = "models/ToM_models/dialogue_Yes_pov_First_LSTM_8_j.torch"
model_file = "models/gt_dialogue_moves_bootstrap_DlgMoveFirst/gt_dialogue_moves_Transformer_dlgMove_Yes_dlg_No_pov_None_exp8_seed_5.torch"
use_dialogue = "No"#model_file.split("_")[-6]
model_type_name = 'Transformer'#model_file.split("_")[-3]
model_type = MODEL_TYPES[model_type_name]
model = ToMModel(model_type).to(DEVICE)
model.load_state_dict(torch.load(model_file))
dataset_splits = json.load(open('config/dataset_splits.json'))
for set in dataset_splits.values():
for path in set:
for pov in [1, 2]:
out_file = f'{path}/intermediate_ToM8_{path.split("/")[-1]}_player{pov}.npz'
if os.path.isfile(out_file):
continue
game = GameParser(path,use_dialogue=='Yes',4,True)
l = model(game, global_plan=False, player_plan=True,intermediate=True).cpu().data.numpy()
np.savez_compressed(open(out_file,'wb'), data=l)
print(out_file,l.shape,model_type_name,use_dialogue,use_dialogue=='Yes')
# break
# break
# break
# model_file = "models/20211230/dialogue_act_First_LSTM.torch"
# use_dialogue = True
# model_type_name = model_file.split("_")[-1].split('.')[0]
# model_type = MODEL_TYPES[model_type_name]
# model = DActModel(model_type).to(DEVICE)
# model.load_state_dict(torch.load(model_file))
# dataset_splits = json.load(open('config/dataset_splits.json'))
# for set in dataset_splits.values():
# for path in set:
# for pov in [1, 2]:
# out_file = f'{path}/intermediate_DAct_{path.split("/")[-1]}_player{pov}.npz'
# if os.path.isfile(out_file):
# continue
# game = GameParser(path,use_dialogue=='Yes',pov)
# l = model(game, global_plan=False, player_plan=True,intermediate=True).cpu().data.numpy()
# np.savez_compressed(open(out_file,'wb'), data=l)
# print(out_file,l.shape,model_type_name,use_dialogue)
# # break
# # break
# # break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='.')
parser.add_argument('--pov', type=str,
help='point of view [None, First, Third]')
parser.add_argument('--use_dialogue', type=str,
help='Use dialogue [Yes, No]')
parser.add_argument('--plans', type=str,
help='Use dialogue [Yes, No]')
parser.add_argument('--seq_model', type=str,
help='point of view [GRU, LSTM, Transformer, None]')
parser.add_argument('--experiment', type=int,
help='point of view [0:AggQ1, 1:AggQ2, 2:AggQ3, 3:P0Q1, 4:P0Q2, 5:P0Q3, 6:P1Q1, 7:P1Q2, 8:P1Q3]')
parser.add_argument('--save_path', type=str,
help='path where to save model')
parser.add_argument('--seed', type=str,
help='Use random or fixed seed [Random, Fixed]')
main(parser.parse_args())