Spaces:
Sleeping
Sleeping
File size: 5,626 Bytes
6364b8e 063d7d0 436d80d e2346d7 06b2f35 e2346d7 436d80d 6364b8e 436d80d 759d503 53c3b30 e2346d7 063d7d0 e2346d7 6a6c460 387ecb3 6364b8e 387ecb3 e2346d7 6364b8e e2346d7 436d80d bec3144 e2346d7 6364b8e e2346d7 06b2f35 e2346d7 6a6c460 e2346d7 387ecb3 e2346d7 387ecb3 e2346d7 387ecb3 e2346d7 06b2f35 e2346d7 6a6c460 e2346d7 387ecb3 e2346d7 387ecb3 e2346d7 387ecb3 e2346d7 387ecb3 e2346d7 06b2f35 e2346d7 6a6c460 e2346d7 06b2f35 6a6c460 c69a42d 6a6c460 7e93fa5 ad2731a 0267d86 6791020 e2346d7 bb8494e 22e9806 abc1620 22e9806 e2346d7 2d16dbe 85d2a97 06b2f35 85d2a97 22e9806 85d2a97 22e9806 85d2a97 22e9806 e2346d7 06b2f35 e2346d7 06b2f35 e2346d7 06b2f35 6791020 06b2f35 6a6c460 06b2f35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import gradio as gr
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline
from numpy import exp
import pandas as pd
from PIL import Image
import urllib.request
import uuid
uid=uuid.uuid4()
def softmax(vector):
e = exp(vector)
return e / e.sum()
models=[
"Nahrawy/AIorNot",
"umm-maybe/AI-image-detector",
"arnolfokam/ai-generated-image-detector",
]
fin_sum=[]
def aiornot0(image):
labels = ["Real", "AI"]
mod=models[0]
feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod)
model0 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor0(image, return_tensors="pt")
with torch.no_grad():
outputs = model0(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
fin_sum.append(results)
return gr.HTML.update(html_out),results
def aiornot1(image):
labels = ["Real", "AI"]
mod=models[1]
feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod)
model1 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor1(image, return_tensors="pt")
with torch.no_grad():
outputs = model1(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
fin_sum.append(results)
return gr.HTML.update(html_out),results
def aiornot2(image):
labels = ["AI", "Real"]
mod=models[2]
feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod)
model2 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor2(image, return_tensors="pt")
with torch.no_grad():
outputs = model2(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[1][0]}<br>
AI: {px[0][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
fin_sum.append(results)
return gr.HTML.update(html_out),results
def load_url(url):
try:
urllib.request.urlretrieve(
f'{url}',
f"{uid}tmp_im.png")
image = Image.open(f"{uid}tmp_im.png")
mes = "Image Loaded"
except Exception as e:
image=None
mes=f"Image not Found<br>Error: {e}"
return image,mes
def tot_prob():
try:
fin_out = fin_sum[0]["Real"]+fin_sum[1]["Real"]+fin_sum[2]["Real"]
fin_out = fin_out/3
fin_sub = 1-fin_out
out={
"Real":f"{fin_out}",
"AI":f"{fin_sub}"
}
#fin_sum.clear()
print (fin_out)
return out
except Exception as e:
pass
print (e)
return None
def fin_clear():
fin_sum.clear()
return None
with gr.Blocks() as app:
gr.Markdown("""<center><h1>AI Image Detector<br><h4>(Test Demo - accuracy varies by model)""")
with gr.Column():
inp = gr.Image(type='numpy')
in_url=gr.Textbox(label="Image URL")
with gr.Row():
load_btn=gr.Button("Load URL")
btn = gr.Button("Detect AI")
mes = gr.HTML("""""")
with gr.Group():
with gr.Row():
fin=gr.Label(label="Final Probability")
with gr.Row():
with gr.Box():
lab0 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[0]}'>{models[0]}</a></b>""")
nun0 = gr.HTML("""""")
with gr.Box():
lab1 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[1]}'>{models[1]}</a></b>""")
nun1 = gr.HTML("""""")
with gr.Box():
lab2 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[2]}'>{models[2]}</a></b>""")
nun2 = gr.HTML("""""")
with gr.Row():
with gr.Box():
n_out0=gr.Label(label="Output")
outp0 = gr.HTML("""""")
with gr.Box():
n_out1=gr.Label(label="Output")
outp1 = gr.HTML("""""")
with gr.Box():
n_out2=gr.Label(label="Output")
outp2 = gr.HTML("""""")
btn.click(fin_clear,None,fin)
load_btn.click(load_url,in_url,[inp,mes])
btn.click(aiornot0,[inp],[outp0,n_out0]).then(tot_prob,None,fin)
btn.click(aiornot1,[inp],[outp1,n_out1]).then(tot_prob,None,fin)
btn.click(aiornot2,[inp],[outp2,n_out2]).then(tot_prob,None,fin)
app.queue(concurrency_count=20).launch() |