forked from MurtyShikhar/structural-grokking
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtransformer_helpers.py
66 lines (58 loc) · 1.92 KB
/
transformer_helpers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import torch.nn
from layers.transformer import Transformer
from layers.transformer.transformer import TransformerDecoderWithLayer
from models import TransformerEncDecModel, TransformerDecModel
from interfaces import (
TransformerEncDecInterface,
TransformerDecOnlyInterface,
TransformerLMInterface,
)
from models.transformer_lm import TransformerLM
def create_lm(in_vocab_size, vec_dim, n_heads, encoder_n_layers) -> torch.nn.Module:
args = dict(embedding_init="xavier", scale_mode="opennmt")
return TransformerLM(
in_vocab_size, vec_dim, n_heads, num_encoder_layers=encoder_n_layers, **args
)
def create_model(
in_vocab_size,
out_vocab_size,
vec_dim,
n_heads,
encoder_n_layers,
decoder_n_layers,
is_null_encoder=False,
mode="enc_dec",
) -> torch.nn.Module:
args = dict(embedding_init="xavier", scale_mode="opennmt", mode=mode)
if is_null_encoder:
return TransformerDecModel(
in_vocab_size,
out_vocab_size,
vec_dim,
n_heads,
num_encoder_layers=encoder_n_layers,
num_decoder_layers=decoder_n_layers,
tied_embedding=True,
**args
)
else:
return TransformerEncDecModel(
in_vocab_size,
out_vocab_size,
vec_dim,
n_heads,
num_encoder_layers=encoder_n_layers,
num_decoder_layers=decoder_n_layers,
tied_embedding=True,
**args
)
def create_model_interface(
model, label_smoothing=0.0, is_null_encoder=False, is_lm=False
):
if is_null_encoder:
return TransformerDecOnlyInterface(model, label_smoothing=label_smoothing)
elif is_lm:
return TransformerLMInterface(model, label_smoothing=label_smoothing)
else:
return TransformerEncDecInterface(model, label_smoothing=label_smoothing)
#### Similar interfaces for pretrained models...