-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
66 lines (62 loc) · 2.83 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import torch
from torch import nn
class RecurrentVecEncoder(nn.Module):
def __init__(self, in_features, config: dict) -> None:
"""Initializes a recurrent vector encoder.
Arguments:
in_features {int} -- The number of features in the input data.
config {dict} -- The model configuration.
"""
super().__init__()
self.config = config
self.vec_encoder = nn.Linear(in_features, self.config['hidden_size'])
self.activ_fn = nn.ReLU()
if self.config["rnn"] == "lstm":
self.recurrent_layer = nn.LSTM(self.config["hidden_size"], self.config["hidden_state_size"], num_layers=self.config["num_layers"], batch_first=True)
else:
self.recurrent_layer = nn.GRU(self.config['hidden_size'], self.config['hidden_state_size'], num_layers=self.config["num_layers"], batch_first=True)
def forward(self, h: torch.Tensor) -> torch.Tensor:
"""Encodes the input vector.
Arguments:
data {torch.Tensor} -- input sequence
Returns:
{torch.Tensor} -- encoded sequence
"""
# Get the number of sequences
num_sequences, sequence_len = h.shape[0], h.shape[1]
# Flatten the input vector
h = h.reshape(num_sequences * sequence_len, -1)
# Encode the input vector
h = self.activ_fn(self.vec_encoder(h))
# Reshape the encoded vector to the original shape
h = h.reshape(num_sequences, sequence_len, -1)
# Forward recurrent layer
h, _ = self.recurrent_layer(h, None)
# Flatten the output vector
h = h.reshape(num_sequences * sequence_len, -1)
return h
class Classifier(nn.Module):
def __init__(self, in_features, config: dict) -> None:
"""Initializes the classifier model.
Arguments:
in_features {int} -- The number of features in the input data.
config {dict} -- The model configuration.
"""
super().__init__()
if config["encoder"] == "VecEncoder":
self.encoder = nn.Linear(in_features, config['hidden_size'])
in_features_next_layer = config['hidden_size']
else:
self.encoder = RecurrentVecEncoder(in_features, config)
in_features_next_layer = config['hidden_state_size']
self.out = nn.Linear(in_features_next_layer, 1)
def forward(self, data: torch.Tensor) -> torch.Tensor:
"""The forward pass of the model.
Arguments:
data {torch.Tensor} -- The input data for the model.
Returns:
{torch.Tensor} -- The output of the model, wether the data is a positive or negative example.
"""
h = self.encoder(data)
h = self.out(h).squeeze()
return h