4/11/2022

LSTM Autoencoder pytorch

 

Code 

...

import torch
import torch.nn as nn
from torchinfo import summary
import copy

class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dims, num_layers, num_LSTM):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dims = hidden_dims
self.num_layers = num_layers
LSTMs=[]
fDim = self.input_dim
for i in range(num_LSTM):
LSTMs.append( nn.LSTM(input_size=fDim, hidden_size=hidden_dims[i], num_layers=self.num_layers, batch_first=True) )
fDim = hidden_dims[i]
self.lstms = nn.ModuleList(LSTMs)
def forward(self, x):

for i, lstm in enumerate(self.lstms):
lstm_out, (hidden_out, cell_out) = lstm(x)
x = lstm_out
last_sequence_hidden_dim = x[:,-1,:] #lstm_out[:,-1,:]
return x, last_sequence_hidden_dim

class regressor(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.1):
super(regressor, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.dropout = dropout

self.regressor = self.make_regressor()
def make_regressor(self):
layers = []
layers.append(nn.Dropout(self.dropout))
layers.append(nn.Linear(self.input_dim, self.input_dim // 2))
layers.append(nn.ReLU())
layers.append(nn.Linear(self.input_dim // 2, self.output_dim))
regressor = nn.Sequential(*layers)
return regressor
def forward(self,x):
x = self.regressor(x)
return x

class LSTM_autoencoder(nn.Module):
def __init__(self, input_dim, encoder_hidden_dims, num_layers, num_LSTM, input_seq):
super(LSTM_autoencoder, self).__init__()

self.input_dim = input_dim #5
self.encoder_hidden_dims = copy.deepcopy(encoder_hidden_dims) #[256, 128, 64]
encoder_hidden_dims.reverse()
self.decoder_hidden_dims = copy.deepcopy(encoder_hidden_dims) #[64, 128, 256]
self.num_layers = num_layers #2
self.num_LSTM = num_LSTM #3
self.input_seq = input_seq

#LSTM model encoder
self.lstm_encoder = LSTM(input_dim, self.encoder_hidden_dims, num_layers, num_LSTM)
#LSTM model decoder
self.lstm_decoder = LSTM(self.decoder_hidden_dims[0], self.decoder_hidden_dims, num_layers, num_LSTM)
#LSTM regressor model
self.lstm_regressor = regressor(self.encoder_hidden_dims[0], input_dim)
def forward(self, x):
input_encoder=x
_, output_encoder = self.lstm_encoder(input_encoder)
print(f'1 - lstm encoder input:{input_encoder.shape} output:{output_encoder.shape}')
x_inter = torch.unsqueeze(output_encoder, 1)
intput_decoder = x_inter.repeat(1, self.input_seq, 1)
print(f'2 - input_decoder: {intput_decoder.shape}')
output_decoder, _ = self.lstm_decoder(intput_decoder)
print(f'3 - input decoder: {intput_decoder.shape} output decoder:{output_decoder.shape}')

output_regressor = self.lstm_regressor(output_decoder)
print(f'4 - output_regressor input: {output_decoder.shape} output decoder:{output_regressor.shape}')
return output_regressor

...


Test class and show summary

..

input_dim = 5
num_LSTM = 2
encoder_hidden_dims = [256, 128]
num_layers = 2
input_seq = 140
batch_size=100

lstm_auto_model = LSTM_autoencoder(input_dim, encoder_hidden_dims, num_layers, num_LSTM, input_seq)
summary(lstm_auto_model, input_size=(batch_size, input_seq, input_dim))

..

output



..


Refer to my ugly drawing


Thank you.

www.marearts.com


No comments:

Post a Comment