hkanumilli commited on
Commit
6c0d444
·
1 Parent(s): 284e3bc

first test run

Browse files
Files changed (3) hide show
  1. main.py +42 -0
  2. mnist_net.pth +3 -0
  3. neural_network.py +39 -0
main.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import torchvision.transforms as transforms
4
+ from neural_network import MNISTNetwork
5
+
6
+
7
+ transform = transforms.Compose([
8
+ transforms.ToTensor(), # Convert image to tensor
9
+ transforms.Normalize((0.1307,), (0.3081,)) # Normalize the image
10
+ ])
11
+
12
+ # Load the trained model
13
+ net = MNISTNetwork()
14
+ net.load_state_dict(torch.load('mnist_net.pth'))
15
+ LABELS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
16
+
17
+ def predict(drawing):
18
+ if drawing is None:
19
+ return "Draw something hoe"
20
+
21
+ input_tensor = transform(drawing)
22
+ x = input_tensor.view(input_tensor.shape[0], -1)
23
+
24
+ with torch.no_grad():
25
+ output = net(x)
26
+
27
+ probabilities = torch.nn.functional.softmax(output[0], dim=0)
28
+ values, indices = torch.topk(probabilities, 10)
29
+ results = {LABELS[i]: v.item() for i, v in zip(indices, values)}
30
+
31
+ return results
32
+
33
+
34
+ sketchpad_input = gr.Sketchpad(shape=(28, 28))
35
+ interface = gr.Interface(
36
+ fn=predict,
37
+ inputs=sketchpad_input,
38
+ outputs="label",
39
+ live=True
40
+ )
41
+ interface.launch()
42
+
mnist_net.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d782b2adae4525684264e311d73f3bb52e250b6bc13d0a9446e8fb45bc715a
3
+ size 446799
neural_network.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch.nn.functional as F
3
+
4
+ class MNISTNetwork(nn.Module):
5
+
6
+ def __init__(self):
7
+ super().__init__()
8
+ self.layer1 = nn.Linear(784, 128)
9
+ self.layer2 = nn.Linear(128, 64)
10
+ self.layer3 = nn.Linear(64, 32)
11
+ self.layer4 = nn.Linear(32, 10)
12
+
13
+ def forward(self, x):
14
+ x = F.relu(self.layer1(x))
15
+ x = F.relu(self.layer2(x))
16
+ x = F.relu(self.layer3(x))
17
+ x = self.layer4(x)
18
+ return F.log_softmax(x, dim=1)
19
+
20
+
21
+
22
+ # class MNISTNetwork(nn.Module):
23
+ # def __init__(self):
24
+ # super().__init__()
25
+ # self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
26
+ # self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
27
+ # self.fc1 = nn.Linear(64*7*7, 1024)
28
+ # self.fc2 = nn.Linear(1024, 10)
29
+
30
+ # def forward(self, x):
31
+ # x = nn.functional.relu(self.conv1(x))
32
+ # x = nn.functional.max_pool2d(x, 2)
33
+ # x = nn.functional.relu(self.conv2(x))
34
+ # x = nn.functional.max_pool2d(x, 2)
35
+ # x = x.view(-1, 64*7*7)
36
+ # x = nn.functional.relu(self.fc1(x))
37
+ # x = nn.functional.dropout(x, training=self.training)
38
+ # x = self.fc2(x)
39
+ # return nn.functional.log_softmax(x, dim=1)