AI-or-Not-v2 / app.py
Omnibus's picture
Update app.py
e2346d7
raw
history blame
4.04 kB
import gradio as gr
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline
from numpy import exp
import pandas as pd
def softmax(vector):
e = exp(vector)
return e / e.sum()
models=[
"Nahrawy/AIorNot",
"umm-maybe/AI-image-detector",
"arnolfokam/ai-generated-image-detector",
]
def aiornot0(image):
labels = ["Real", "AI"]
mod=models[0]
feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod)
model0 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor0(image, return_tensors="pt")
with torch.no_grad():
outputs = model0(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Model used: <a href='https://huggingface.co/{mod}'>{mod}</a><br>
<br>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
return gr.HTML.update(html_out),results
def aiornot1(image):
labels = ["Real", "AI"]
mod=models[1]
feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod)
model1 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor1(image, return_tensors="pt")
with torch.no_grad():
outputs = model1(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Model used: <a href='https://huggingface.co/{mod}'>{mod}</a><br>
<br>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
return gr.HTML.update(html_out),results
def aiornot2(image):
labels = ["AI", "Real"]
mod=models[2]
feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod)
model2 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor2(image, return_tensors="pt")
with torch.no_grad():
outputs = model2(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Model used: <a href='https://huggingface.co/{mod}'>{mod}</a><br>
<br>
Probabilites:<br>
Real: {px[1][0]}<br>
AI: {px[0][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
return gr.HTML.update(html_out),results
with gr.Blocks() as app:
with gr.Column():
inp = gr.Pil()
btn = gr.Button()
with gr.Group():
with gr.Row():
with gr.Box():
lab0 = gr.HTML(f"""<b>Testing on Model: {models[0]}</b>""")
outp0 = gr.HTML("""""")
n_out0=gr.Label(label="Output")
with gr.Box():
lab1 = gr.HTML(f"""<b>Testing on Model: {models[1]}</b>""")
outp1 = gr.HTML("""""")
n_out1=gr.Label(label="Output")
with gr.Box():
lab2 = gr.HTML(f"""<b>Testing on Model: {models[2]}</b>""")
outp2 = gr.HTML("""""")
n_out2=gr.Label(label="Output")
btn.click(aiornot0,[inp],[outp0,n_out0])
btn.click(aiornot1,[inp],[outp1,n_out1])
btn.click(aiornot2,[inp],[outp2,n_out2])
app.launch(enable_queue=False)