cyberNetDetect / app.py
z4hid's picture
Upload app.py
990e221 verified
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import gradio as gr
from PIL import Image
# Model definition
class CyberNet(nn.Module):
def __init__(self, input_channels=3, num_classes=87):
super(CyberNet, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 93, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(93, 52, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(52, 172, kernel_size=5, stride=1, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.feature_size = self._get_conv_output_size(input_channels, (224, 224))
self.fc1 = nn.Linear(self.feature_size, 512)
self.fc2 = nn.Linear(512, num_classes)
self.dropout = nn.Dropout(0.5)
def _get_conv_output_size(self, input_channels, input_size):
dummy_input = torch.zeros(1, input_channels, *input_size)
x = self.pool(F.relu(self.conv1(dummy_input)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
return x.numel()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
# Load the model and weights
device = torch.device('cpu')
model = CyberNet(num_classes=87)
model.load_state_dict(torch.load('cyberNet.pt', map_location=device, weights_only=True))
# Data transformation pipeline
IMAGE_SIZE = 224 # Define the image size for resizing
data_transform = transforms.Compose([
transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), # Resizing the input image to 224x224
transforms.ToTensor(), # Convert image to Tensor
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Normalize based on ImageNet mean and std
])
# Define class names (you should replace this with your actual class names)
class_names = ['7ev3n', 'APosT', 'Adposhel', 'Agent', 'Agentb', 'Allaple', 'Alueron.gen!J', 'Amonetize', 'Androm', 'Bashlite', 'Bingoml', 'Blacksoul', 'BrowseFox', 'C2LOP.gen!g', 'Convagent', 'Copak', 'Delf', 'Dialplatform.B', 'Dinwod', 'Elex', 'Emotet', 'Escelar', 'Expiro', 'Fakerean', 'Fareit', 'Fasong', 'GandCrab', 'GlobelImposter', 'GootLoader', 'HLLP', 'HackKMS', 'Hlux', 'IcedId', 'Infy', 'Inject', 'Injector', 'InstallCore', 'KRBanker', 'Koadic', 'Kryptik', 'Kwampirs', 'Lamer', 'LemonDuck', 'Loki', 'Lolyda.AA1', 'Lolyda.AA2', 'Mimail', 'MultiPlug', 'Mydoom', 'Neoreklami', 'Neshta', 'NetWireRAT', 'Ngrbot', 'OnlinerSpambot', 'Orcus', 'Padodor', 'Plite', 'PolyRansom', 'QakBot', 'QtBot', 'Qukart', 'REvil', 'Ramdo', 'Regrun', 'Rekt Loader', 'Sakula', 'Salgorea', 'Scar', 'SelfDel', 'Small', 'Snarasite', 'Stantinko', 'Trickpak', 'Upantix', 'Upatre', 'VB', 'VBA', 'VBKrypt', 'VBNA', 'Vilsel', 'Vobfus', 'WBNA', 'Wecod', 'XTunnel', 'Zenpak', 'Zeus', 'benign']
print(class_names)
# Define the predict function
def predict(inp):
inp = data_transform(inp).unsqueeze(0) # Apply the transformation and add a batch dimension
model.eval()
with torch.no_grad():
prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
confidences = {class_names[i]: float(prediction[i]) for i in range(len(class_names))}
return confidences
# Gradio app definition
gr.Interface(fn=predict,
inputs=gr.Image(type="pil"), # Accept PIL images
outputs=gr.Label(num_top_classes=3), # Output top 3 classes
examples=["benign.png", "Upatre (88).png","Fakerean_aug_271_7932869.png" ,"Email-Worm.Win32.Mimail-q-9ee5ea94838d5af80a11d49a3c2e344c0122d227daa8e6f30e19858a91b3ffa1_RGB.png"]).launch(share=True)