|
import torch
|
|
import torch.nn as nn
|
|
import torch.optim as optim
|
|
|
|
|
|
class SimpleNN(nn.Module):
|
|
def __init__(self, input_size, hidden_size, output_size):
|
|
super(SimpleNN, self).__init__()
|
|
self.hidden = nn.Linear(input_size, hidden_size)
|
|
self.activation = nn.ReLU()
|
|
self.output = nn.Linear(hidden_size, output_size)
|
|
|
|
def forward(self, x):
|
|
x = self.hidden(x)
|
|
x = self.activation(x)
|
|
x = self.output(x)
|
|
return x
|
|
|
|
|
|
input_size = 10
|
|
hidden_size = 4
|
|
output_size = 4
|
|
|
|
model = SimpleNN(input_size, hidden_size, output_size)
|
|
print(model)
|
|
|
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
optimizer = optim.Adam(model.parameters(), lr=0.01)
|
|
|
|
|
|
X_train = torch.rand(10, input_size)
|
|
y_train = torch.tensor([0, 1, 2, 1, 0, 1, 2, 1, 0, 1])
|
|
|
|
|
|
for i in range(1):
|
|
optimizer.zero_grad()
|
|
outputs = model(X_train)
|
|
loss = criterion(outputs, y_train)
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
|
|
print(f"Output:\n{outputs}")
|
|
print(f"Loss: {loss.item()}")
|
|
|