Spaces:
Sleeping
Sleeping
File size: 2,889 Bytes
6364b8e bda908c d19e84e 436d80d bda908c 06b2f35 bda908c 977f529 d19e84e 75a5505 d19e84e 06b2f35 bda908c e2346d7 436d80d bda908c 063d7d0 bda908c a2250cc bda908c 387ecb3 bda908c e2346d7 bda908c 387ecb3 e2346d7 bda908c e2346d7 a032ce8 d19e84e bda908c e2346d7 bda908c e2346d7 bda908c 6a6c460 bda908c 06b2f35 bda908c 06b2f35 bda908c 6a6c460 bda908c a032ce8 6a6c460 bda908c 7e93fa5 bda908c ad2731a 0267d86 6791020 58ce2bc e2346d7 bda908c 06b2f35 bda908c d19e84e bda908c d19e84e bda908c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import gradio as gr
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
import os
from numpy import exp
import pandas as pd
from PIL import Image
import urllib.request
import uuid
uid = uuid.uuid4()
models = [
"cmckinle/sdxl-flux-detector",
"umm-maybe/AI-image-detector",
"Organika/sdxl-detector",
]
fin_sum = []
def softmax(vector):
e = exp(vector)
return e / e.sum()
def aiornot(image, model_index):
labels = ["AI", "Real"]
mod = models[model_index]
feature_extractor = AutoFeatureExtractor.from_pretrained(mod)
model = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor(image, return_tensors="pt")
with torch.no_grad():
outputs = model(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilities:<br>
Real: {px[1][0]}<br>
AI: {px[0][0]}"""
results = {}
for idx, result in enumerate(px):
results[labels[idx]] = px[idx][0]
fin_sum.append(results)
return gr.HTML.update(html_out), results
def load_url(url):
try:
urllib.request.urlretrieve(f'{url}', f"{uid}tmp_im.png")
image = Image.open(f"{uid}tmp_im.png")
mes = "Image Loaded"
except Exception as e:
image = None
mes = f"Image not Found<br>Error: {e}"
return image, mes
def tot_prob():
try:
fin_out = sum([result["Real"] for result in fin_sum]) / len(fin_sum)
fin_sub = 1 - fin_out
out = {
"Real": f"{fin_out:.2%}",
"AI": f"{fin_sub:.2%}"
}
return out
except Exception as e:
print(e)
return None
def fin_clear():
fin_sum.clear()
return None
with gr.Blocks() as app:
gr.Markdown("""<center><h1>AI Image Detector<br><h4>(Test Demo - accuracy varies by model)</h4></center>""")
inp = gr.Image(type='pil')
in_url = gr.Textbox(label="Image URL")
load_btn = gr.Button("Load URL")
btn = gr.Button("Detect AI")
mes = gr.HTML("""""")
fin = gr.Label(label="Final Probability")
outp0 = gr.HTML("""""")
outp1 = gr.HTML("""""")
outp2 = gr.HTML("""""")
load_btn.click(load_url, in_url, [inp, mes])
btn.click(fin_clear, None, fin, show_progress=False)
btn.click(lambda img: aiornot(img, 0), inp, [outp0]).then(tot_prob, None, fin, show_progress=False)
btn.click(lambda img: aiornot(img, 1), inp, [outp1]).then(tot_prob, None, fin, show_progress=False)
btn.click(lambda img: aiornot(img, 2), inp, [outp2]).then(tot_prob, None, fin, show_progress=False)
app.launch(show_api=False, max_threads=24)
|