Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import torchvision.transforms as transforms
|
5 |
+
import gradio as gr
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
# Model definition
|
9 |
+
class CyberNet(nn.Module):
|
10 |
+
def __init__(self, input_channels=3, num_classes=87):
|
11 |
+
super(CyberNet, self).__init__()
|
12 |
+
self.conv1 = nn.Conv2d(input_channels, 93, kernel_size=3, stride=1, padding=1)
|
13 |
+
self.conv2 = nn.Conv2d(93, 52, kernel_size=3, stride=1, padding=1)
|
14 |
+
self.conv3 = nn.Conv2d(52, 172, kernel_size=5, stride=1, padding=1)
|
15 |
+
self.pool = nn.MaxPool2d(2, 2)
|
16 |
+
|
17 |
+
self.feature_size = self._get_conv_output_size(input_channels, (224, 224))
|
18 |
+
self.fc1 = nn.Linear(self.feature_size, 512)
|
19 |
+
self.fc2 = nn.Linear(512, num_classes)
|
20 |
+
self.dropout = nn.Dropout(0.5)
|
21 |
+
|
22 |
+
def _get_conv_output_size(self, input_channels, input_size):
|
23 |
+
dummy_input = torch.zeros(1, input_channels, *input_size)
|
24 |
+
x = self.pool(F.relu(self.conv1(dummy_input)))
|
25 |
+
x = self.pool(F.relu(self.conv2(x)))
|
26 |
+
x = self.pool(F.relu(self.conv3(x)))
|
27 |
+
return x.numel()
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
x = self.pool(F.relu(self.conv1(x)))
|
31 |
+
x = self.pool(F.relu(self.conv2(x)))
|
32 |
+
x = self.pool(F.relu(self.conv3(x)))
|
33 |
+
x = x.view(x.size(0), -1)
|
34 |
+
x = F.relu(self.fc1(x))
|
35 |
+
x = self.dropout(x)
|
36 |
+
x = self.fc2(x)
|
37 |
+
return x
|
38 |
+
|
39 |
+
# Load the model and weights
|
40 |
+
device = torch.device('cpu')
|
41 |
+
model = CyberNet(num_classes=87)
|
42 |
+
model.load_state_dict(torch.load('cyberNet.pt', map_location=device, weights_only=True))
|
43 |
+
|
44 |
+
# Data transformation pipeline
|
45 |
+
IMAGE_SIZE = 224 # Define the image size for resizing
|
46 |
+
|
47 |
+
data_transform = transforms.Compose([
|
48 |
+
transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), # Resizing the input image to 224x224
|
49 |
+
transforms.ToTensor(), # Convert image to Tensor
|
50 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Normalize based on ImageNet mean and std
|
51 |
+
])
|
52 |
+
|
53 |
+
# Define class names (you should replace this with your actual class names)
|
54 |
+
class_names = ['7ev3n', 'APosT', 'Adposhel', 'Agent', 'Agentb', 'Allaple', 'Alueron.gen!J', 'Amonetize', 'Androm', 'Bashlite', 'Bingoml', 'Blacksoul', 'BrowseFox', 'C2LOP.gen!g', 'Convagent', 'Copak', 'Delf', 'Dialplatform.B', 'Dinwod', 'Elex', 'Emotet', 'Escelar', 'Expiro', 'Fakerean', 'Fareit', 'Fasong', 'GandCrab', 'GlobelImposter', 'GootLoader', 'HLLP', 'HackKMS', 'Hlux', 'IcedId', 'Infy', 'Inject', 'Injector', 'InstallCore', 'KRBanker', 'Koadic', 'Kryptik', 'Kwampirs', 'Lamer', 'LemonDuck', 'Loki', 'Lolyda.AA1', 'Lolyda.AA2', 'Mimail', 'MultiPlug', 'Mydoom', 'Neoreklami', 'Neshta', 'NetWireRAT', 'Ngrbot', 'OnlinerSpambot', 'Orcus', 'Padodor', 'Plite', 'PolyRansom', 'QakBot', 'QtBot', 'Qukart', 'REvil', 'Ramdo', 'Regrun', 'Rekt Loader', 'Sakula', 'Salgorea', 'Scar', 'SelfDel', 'Small', 'Snarasite', 'Stantinko', 'Trickpak', 'Upantix', 'Upatre', 'VB', 'VBA', 'VBKrypt', 'VBNA', 'Vilsel', 'Vobfus', 'WBNA', 'Wecod', 'XTunnel', 'Zenpak', 'Zeus', 'benign']
|
55 |
+
print(class_names)
|
56 |
+
|
57 |
+
# Define the predict function
|
58 |
+
def predict(inp):
|
59 |
+
inp = data_transform(inp).unsqueeze(0) # Apply the transformation and add a batch dimension
|
60 |
+
model.eval()
|
61 |
+
with torch.no_grad():
|
62 |
+
prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
|
63 |
+
confidences = {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
|
64 |
+
return confidences
|
65 |
+
|
66 |
+
# Gradio app definition
|
67 |
+
gr.Interface(fn=predict,
|
68 |
+
inputs=gr.Image(type="pil"), # Accept PIL images
|
69 |
+
outputs=gr.Label(num_top_classes=3), # Output top 3 classes
|
70 |
+
examples=["benign.png", "Upatre (88).png","Fakerean_aug_271_7932869.png" ,"Email-Worm.Win32.Mimail-q-9ee5ea94838d5af80a11d49a3c2e344c0122d227daa8e6f30e19858a91b3ffa1_RGB.png"]).launch(share=True)
|