From 3000b3551c3bf8081925233696caae0b3b2cd64d Mon Sep 17 00:00:00 2001 From: chmok <52270280+chmok@users.noreply.github.com> Date: Thu, 23 Nov 2023 16:44:52 +0900 Subject: [PATCH] Delete models/rnn.py delete rnn model --- models/rnn.py | 46 ---------------------------------------------- 1 file changed, 46 deletions(-) delete mode 100644 models/rnn.py diff --git a/models/rnn.py b/models/rnn.py deleted file mode 100644 index 221960e..0000000 --- a/models/rnn.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch.nn as nn -import torch -import torch.optim as optim -from models.train_model_multi import Train_Test - -import numpy as np -import time -import copy - -class RNN_model(nn.Module): - def __init__(self, input_size, hidden_size, num_layers, num_classes, bidirectional, rnn_type, device='cuda'): - super(RNN_model, self).__init__() - self.hidden_size = hidden_size - self.num_layers = num_layers - self.num_classes = num_classes - self.rnn_type = rnn_type - self.num_directions = 2 if bidirectional == True else 1 - self.device = device - - # rnn_type에 따른 recurrent layer 설정 - if self.rnn_type == 'lstm': - self.rnn = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=bidirectional) - elif self.rnn_type == 'gru': - self.rnn = nn.GRU(input_size, hidden_size, num_layers, batch_first=True, bidirectional=bidirectional) - - # bidirectional에 따른 fc layer 구축 - # bidirectional 여부에 따라 hidden state의 shape가 달라짐 (True: 2 * hidden_size, False: hidden_size) - self.fc = nn.Linear(self.num_directions * hidden_size, self.num_classes) - - def forward(self, x): - # data dimension: (batch_size x input_size x seq_len) -> (batch_size x seq_len x input_size)로 변환 - x = torch.transpose(x, 1, 2) - - # initial hidden states 설정 - h0 = torch.zeros(self.num_directions * self.num_layers, x.size(0), self.hidden_size).to(self.device) - - # 선택한 rnn_type의 RNN으로부터 output 도출 - if self.rnn_type == 'gru': - out, _ = self.rnn(x, h0) # out: tensor of shape (batch_size, seq_length, hidden_size) - else: - # initial cell states 설정 - c0 = torch.zeros(self.num_directions * self.num_layers, x.size(0), self.hidden_size).to(self.device) - out, _ = self.rnn(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size) - - out = self.fc(out[:, -1, :]) - return out