-
Notifications
You must be signed in to change notification settings - Fork 126
/
Copy pathmodel.py
58 lines (48 loc) · 2.16 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import torch
import torch.nn as nn
import sys
import torch.nn.functional as F
class CharCNN(nn.Module):
def __init__(self, config):
super().__init__()
self.is_cuda_enabled = config.cuda
num_conv_filters = config.num_conv_filters
output_channel = config.output_channel
num_affine_neurons = config.num_affine_neurons
target_class = config.target_class
input_channel = 68
self.conv1 = nn.Conv1d(input_channel, num_conv_filters, kernel_size=7)
self.conv2 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=7)
self.conv3 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=3)
self.conv4 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=3)
self.conv5 = nn.Conv1d(num_conv_filters, num_conv_filters, kernel_size=3)
self.conv6 = nn.Conv1d(num_conv_filters, output_channel, kernel_size=3)
self.dropout = nn.Dropout(config.dropout)
self.fc1 = nn.Linear(output_channel, num_affine_neurons)
self.fc2 = nn.Linear(num_affine_neurons, num_affine_neurons)
self.fc3 = nn.Linear(num_affine_neurons, target_class)
def forward(self, x, **kwargs):
if torch.cuda.is_available() and self.is_cuda_enabled:
# print("input to forward is: ", x, file=sys.stderr)
x = x.transpose(1, 2).type(torch.cuda.FloatTensor)
# x = torch.Tensor(
# x
# ).transpose(
# 1, 2
# ).type(torch.cuda.FloatTensor)
else:
# print("input to forward is: ", x, file=sys.stderr)
x = x.transpose(1, 2).type(torch.FloatTensor)
# x = torch.Tensor(x).transpose(1, 2).type(torch.FloatTensor)
x = F.max_pool1d(F.relu(self.conv1(x)), 3)
x = F.max_pool1d(F.relu(self.conv2(x)), 3)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = F.max_pool1d(x, x.size(2)).squeeze(2)
x = F.relu(self.fc1(x.view(x.size(0), -1)))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
return self.fc3(x)