-
Notifications
You must be signed in to change notification settings - Fork 0
/
fsr_model.py
131 lines (115 loc) · 4.91 KB
/
fsr_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import torch
class ANN(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layer, output_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.output_size = output_size
layer_sizes = [[hidden_size, hidden_size] for i in range(num_layer)]
layer_sizes[0][0] = input_size
layer_sizes[-1][1] = output_size
layer = []
for i, (layer_in, layer_out) in enumerate(layer_sizes):
layer.append(torch.nn.Linear(layer_in, layer_out))
if i < len(layer_sizes) - 1:
layer.append(torch.nn.Tanh())
self.layers = torch.nn.Sequential(*layer)
def forward(self, x):
x = self.layers(x)
return x
class LSTM(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layer, output_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.output_size = output_size
self.encoder = torch.nn.LSTM(input_size, hidden_size, num_layer)
self.decoder = torch.nn.Linear(hidden_size, output_size)
def forward(self, x):
x, _ = self.encoder(x)
x = self.decoder(x)
return x
class CNN_LSTM(torch.nn.Module):
def __init__(self, input_size, cnn_hidden_size, lstm_hidden_size, cnn_num_layer, lstm_num_layer, output_size):
super().__init__()
self.input_size = input_size
self.cnn_hidden_size = cnn_hidden_size
self.lstm_hidden_size = lstm_hidden_size
self.cnn_num_layer = cnn_num_layer
self.lstm_num_layer = lstm_num_layer
self.output_size = output_size
self.cnn_encoder = torch.nn.Sequential()
self.cnn_encoder.append(torch.nn.Conv1d(input_size, cnn_hidden_size, 3, padding=1))
self.cnn_encoder.extend(torch.nn.Conv1d(cnn_hidden_size, cnn_hidden_size, 3, padding=1) for _ in range(cnn_num_layer - 1))
self.lstm_encoder = torch.nn.LSTM(cnn_hidden_size, lstm_hidden_size, lstm_num_layer)
self.decoder = torch.nn.Linear(lstm_hidden_size, output_size)
def forward(self, x):
x = x.transpose(-1, -2)
x = self.cnn_encoder(x)
x = x.transpose(-1, -2)
x, _ = self.lstm_encoder(x)
x = self.decoder(x)
return x
class Attention(torch.nn.Module):
def __init__(self, input_size, output_size, num_heads=4):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.attention = torch.nn.MultiheadAttention(input_size, num_heads)
self.decoder = torch.nn.Linear(input_size, output_size)
def forward(self, x):
x, _ = self.attention(x, x, x)
x = self.decoder(x)
return x
class FSRGraphNeuralNetwork(torch.nn.Module):
def __init__(self, input_size, num_layer, output_size):
super().__init__()
self.input_size = input_size
self.num_layer = num_layer
self.output_size = output_size
layers = [[input_size, input_size] for i in range(num_layer)]
layers[-1][1] = output_size
self.weights = torch.nn.ParameterList([
torch.nn.Parameter(torch.empty(input_size, output_size)) for input_size, output_size in layers
])
self.bias = torch.nn.ParameterList([
torch.nn.Parameter(torch.empty(output_size)) for _, output_size in layers
])
for w in self.weights:
torch.nn.init.kaiming_uniform_(w)
for b in self.bias:
torch.nn.init.zeros_(b)
self.adj_matrix = None
if input_size == 6:
self.adj_matrix = [
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1],
]
elif input_size == 12:
self.adj_matrix = [
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
]
if not self.adj_matrix:
assert 'input_size is incompatible'
self.adj_matrix = torch.tensor(self.adj_matrix).float()
def forward(self, x):
for w, b in zip(self.weights, self.bias):
x = x.matmul(self.adj_matrix).matmul(w) + b
return x