-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmlstmfcn.py
97 lines (77 loc) · 3.24 KB
/
mlstmfcn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
"""
Source: https://github.com/metra4ok/MLSTM-FCN-Pytorch/blob/main/src/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1)
return x * y.expand_as(x)
class MLSTMfcn(nn.Module):
def __init__(self, *, num_classes, max_seq_len, num_features,
num_lstm_out=128, num_lstm_layers=1,
conv1_nf=128, conv2_nf=256, conv3_nf=128,
lstm_drop_p=0.8, fc_drop_p=0.3):
super(MLSTMfcn, self).__init__()
self.num_classes = num_classes
self.max_seq_len = max_seq_len
self.num_features = num_features
self.num_lstm_out = num_lstm_out
self.num_lstm_layers = num_lstm_layers
self.conv1_nf = conv1_nf
self.conv2_nf = conv2_nf
self.conv3_nf = conv3_nf
self.lstm_drop_p = lstm_drop_p
self.fc_drop_p = fc_drop_p
self.lstm = nn.LSTM(input_size=self.num_features,
hidden_size=self.num_lstm_out,
num_layers=self.num_lstm_layers,
batch_first=True)
self.conv1 = nn.Conv1d(self.num_features, self.conv1_nf, 8)
self.conv2 = nn.Conv1d(self.conv1_nf, self.conv2_nf, 5)
self.conv3 = nn.Conv1d(self.conv2_nf, self.conv3_nf, 3)
self.bn1 = nn.BatchNorm1d(self.conv1_nf)
self.bn2 = nn.BatchNorm1d(self.conv2_nf)
self.bn3 = nn.BatchNorm1d(self.conv3_nf)
self.se1 = SELayer(self.conv1_nf) # ex 128
self.se2 = SELayer(self.conv2_nf) # ex 256
self.relu = nn.ReLU()
self.lstmDrop = nn.Dropout(self.lstm_drop_p)
self.convDrop = nn.Dropout(self.fc_drop_p)
self.fc = nn.Linear(self.conv3_nf + self.num_lstm_out, self.num_classes)
def forward(self, x, seq_lens):
''' input x should be in size [B,T,F], where
B = Batch size
T = Time samples
F = features
'''
x1 = nn.utils.rnn.pack_padded_sequence(x, seq_lens,
batch_first=True,
enforce_sorted=False)
x1, (ht, ct) = self.lstm(x1)
x1, _ = nn.utils.rnn.pad_packed_sequence(x1, batch_first=True,
padding_value=0.0)
x1 = x1[:, -1, :]
x2 = x.transpose(2, 1)
x2 = self.convDrop(self.relu(self.bn1(self.conv1(x2))))
x2 = self.se1(x2)
x2 = self.convDrop(self.relu(self.bn2(self.conv2(x2))))
x2 = self.se2(x2)
x2 = self.convDrop(self.relu(self.bn3(self.conv3(x2))))
x2 = torch.mean(x2, 2)
x_all = torch.cat((x1, x2), dim=1)
x_out = self.fc(x_all)
x_out = F.softmax(x_out, dim=1)
return x_out