import torch import torch.nn as nn import torch.nn.functional as F device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class LSTM(nn.Module): def __init__(self, vocab_size, n_classes, hidden_dim, embedding_dim, n_layers, dropout, bidirectional = True): super(LSTM, self).__init__() self.n_layers = n_layers self.hidden_dim = hidden_dim self.embedding_dim = embedding_dim # Capas embedding y LSTM self.embedding = nn.Embedding(vocab_size, embedding_dim, device = device) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout = dropout, batch_first = True, bidirectional = bidirectional, device = device) # Dropout self.dropout = nn.Dropout(dropout) # Capa lineal self.fc = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, n_classes, device = device) def forward(self, x): x = self.embedding(x) x, hidden = self.lstm(x) x = x[:, -1, :] x = self.dropout(x) output = self.fc(x) return output, hidden