Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,17 @@
|
|
1 |
-
|
2 |
import gradio as gr
|
3 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
4 |
import torch
|
5 |
import re
|
6 |
from tokenizers import normalizers
|
7 |
from tokenizers.normalizers import Sequence, Replace, Strip, NFKC
|
8 |
-
from tokenizers import Regex
|
9 |
-
|
10 |
|
11 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
12 |
|
13 |
model1_path = "modernbert.bin"
|
14 |
model2_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12"
|
15 |
model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"
|
|
|
16 |
|
17 |
tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
|
18 |
|
@@ -28,6 +27,8 @@ model_3 = AutoModelForSequenceClassification.from_pretrained("answerdotai/Modern
|
|
28 |
model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device))
|
29 |
model_3.to(device).eval()
|
30 |
|
|
|
|
|
31 |
label_mapping = {
|
32 |
0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
|
33 |
6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
|
@@ -42,10 +43,11 @@ label_mapping = {
|
|
42 |
}
|
43 |
|
44 |
def clean_text(text: str) -> str:
|
45 |
-
text = re.sub(r'\s{2,}', ' ', text)
|
46 |
text = re.sub(r'\s+([,.;:?!])', r'\1', text)
|
47 |
return text
|
48 |
|
|
|
49 |
newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
|
50 |
join_hyphen_break = Replace(Regex(r'(\w+)[--]\s*\n\s*(\w+)'), r"\1\2")
|
51 |
|
@@ -59,7 +61,9 @@ tokenizer.backend_tokenizer.normalizer = Sequence([
|
|
59 |
def classify_text(text):
|
60 |
cleaned_text = clean_text(text)
|
61 |
if not text.strip():
|
62 |
-
result_message = (
|
|
|
|
|
63 |
return result_message
|
64 |
|
65 |
inputs = tokenizer(cleaned_text, return_tensors="pt", truncation=True, padding=True).to(device)
|
@@ -68,11 +72,11 @@ def classify_text(text):
|
|
68 |
logits_1 = model_1(**inputs).logits
|
69 |
logits_2 = model_2(**inputs).logits
|
70 |
logits_3 = model_3(**inputs).logits
|
71 |
-
|
72 |
softmax_1 = torch.softmax(logits_1, dim=1)
|
73 |
softmax_2 = torch.softmax(logits_2, dim=1)
|
74 |
softmax_3 = torch.softmax(logits_3, dim=1)
|
75 |
-
|
76 |
averaged_probabilities = (softmax_1 + softmax_2 + softmax_3) / 3
|
77 |
probabilities = averaged_probabilities[0]
|
78 |
|
@@ -93,16 +97,29 @@ def classify_text(text):
|
|
93 |
f"**The text is** <span class='highlight-ai'>**{ai_total_prob:.2f}%** likely <b>AI generated</b>.</span>\n\n"
|
94 |
f"**Identified LLM: {ai_argmax_model}**"
|
95 |
)
|
|
|
96 |
return result_message
|
97 |
|
|
|
|
|
|
|
|
|
98 |
title = "AI Text Detector"
|
99 |
|
100 |
description = """
|
101 |
-
This tool uses the <b>ModernBERT</b> model to identify whether a given text was written by a human or generated by AI.
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
"""
|
|
|
104 |
|
105 |
-
bottom_text = "Built by <b>SzegedAI</b> • ModernBERT Ensemble • Works best with longer texts"
|
106 |
|
107 |
AI_texts = [
|
108 |
"Camels are remarkable desert animals known for their unique adaptations to harsh, arid environments. Native to the Middle East, North Africa, and parts of Asia, camels have been essential to human life for centuries, serving as a mode of transportation, a source of food, and even a symbol of endurance and survival. There are two primary species of camels: the dromedary camel, which has a single hump and is commonly found in the Middle East and North Africa, and the Bactrian camel, which has two humps and is native to Central Asia. Their humps store fat, not water, as commonly believed, allowing them to survive long periods without food by metabolizing the stored fat for energy. Camels are highly adapted to desert life. They can go for weeks without water, and when they do drink, they can consume up to 40 gallons in one sitting. Their thick eyelashes, sealable nostrils, and wide, padded feet protect them from sand and help them walk easily on loose desert terrain.",
|
@@ -114,253 +131,101 @@ Human_texts = [
|
|
114 |
"The present book is intended as a text in basic mathematics. As such, it can have multiple use: for a one-year course in the high schools during the third or fourth year (if possible the third, so that calculus can be taken during the fourth year); for a complementary reference in earlier high school grades (elementary algebra and geometry are covered); for a one-semester course at the college level, to review or to get a firm foundation in the basic mathematics necessary to go ahead in calculus, linear algebra, or other topics. Years ago, the colleges used to give courses in “ college algebra” and other subjects which should have been covered in high school. More recently, such courses have been thought unnecessary, but some experiences I have had show that they are just as necessary as ever. What is happening is that thecolleges are getting a wide variety of students from high schools, ranging from exceedingly well-prepared ones who have had a good first course in calculus, down to very poorly prepared ones.",
|
115 |
"Fats are rich in energy, build body cells, support brain development of infants, help body processes, and facilitate the absorption and use of fat-soluble vitamins A, D, E, and K. The major component of lipids is glycerol and fatty acids. According to chemical properties, fatty acids can be divided into saturated and unsaturated fatty acids. Generally lipids containing saturated fatty acids are solid at room temperature and include animal fats (butter, lard, tallow, ghee) and tropical oils (palm,coconut, palm kernel). Saturated fats increase the risk of heart disease.",
|
116 |
"To make BERT handle a variety of down-stream tasks, our input representation is able to unambiguously represent both a single sentence and a pair of sentences (e.g., h Question, Answeri) in one token sequence. Throughout this work, a “sentence” can be an arbitrary span of contiguous text, rather than an actual linguistic sentence. A “sequence” refers to the input token sequence to BERT, which may be a single sentence or two sentences packed together. We use WordPiece embeddings (Wu et al., 2016) with a 30,000 token vocabulary. The first token of every sequence is always a special classification token ([CLS]). The final hidden state corresponding to this token is used as the aggregate sequence representation for classification tasks. Sentence pairs are packed together into a single sequence."]
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
:
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
}
|
184 |
-
.
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
border-radius: calc(var(--radius) - 2px);
|
208 |
-
box-shadow: inset 0 0 0 999px transparent, var(--shadow);
|
209 |
-
padding: 16px 14px;
|
210 |
-
font-size: 15px;
|
211 |
-
line-height: 1.5;
|
212 |
-
color: var(--text);
|
213 |
-
}
|
214 |
-
#text_input_box textarea:focus {
|
215 |
-
outline: none;
|
216 |
-
border: 1px solid rgba(110,86,207,0.65);
|
217 |
-
box-shadow: 0 0 0 4px rgba(110,86,207,0.20);
|
218 |
-
}
|
219 |
-
|
220 |
-
.controls-row {
|
221 |
-
display: flex; align-items: center; justify-content: space-between;
|
222 |
-
margin-top: 10px; gap: 10px;
|
223 |
-
}
|
224 |
-
.chips {
|
225 |
-
display: flex; gap: 6px; flex-wrap: wrap;
|
226 |
-
}
|
227 |
-
.chip {
|
228 |
-
background: var(--chip-bg);
|
229 |
-
border: 1px solid rgba(110,86,207,0.25);
|
230 |
-
padding: 6px 10px; border-radius: 999px;
|
231 |
-
font-size: 12px; color: var(--text);
|
232 |
-
}
|
233 |
-
|
234 |
-
button.primary, .primary button, .btn-primary {
|
235 |
-
background: linear-gradient(135deg, var(--brand), var(--brand-2));
|
236 |
-
border: none !important;
|
237 |
-
color: white !important;
|
238 |
-
border-radius: 10px !important;
|
239 |
-
padding: 10px 14px !important;
|
240 |
-
box-shadow: 0 8px 24px rgba(110,86,207,0.25);
|
241 |
-
}
|
242 |
-
button.primary:hover, .primary button:hover, .btn-primary:hover {
|
243 |
-
transform: translateY(-1px);
|
244 |
-
box-shadow: 0 10px 28px rgba(6,182,212,0.25);
|
245 |
-
}
|
246 |
-
|
247 |
-
/* Result */
|
248 |
-
#result_output_box {
|
249 |
-
background: var(--panel);
|
250 |
-
border: var(--card-border);
|
251 |
-
border-radius: var(--radius);
|
252 |
-
padding: 16px;
|
253 |
-
font-size: 16px;
|
254 |
-
}
|
255 |
-
.highlight-human {
|
256 |
-
color: var(--ok);
|
257 |
-
background: rgba(34,197,94,0.12);
|
258 |
-
padding: 2px 6px; border-radius: 8px;
|
259 |
-
}
|
260 |
-
.highlight-ai {
|
261 |
-
color: var(--danger);
|
262 |
-
background: rgba(239,68,68,0.12);
|
263 |
-
padding: 2px 6px; border-radius: 8px;
|
264 |
-
}
|
265 |
-
|
266 |
-
|
267 |
-
.tabs-wrap {
|
268 |
-
background: var(--panel-2);
|
269 |
-
border: var(--card-border);
|
270 |
-
border-radius: var(--radius);
|
271 |
-
box-shadow: var(--shadow);
|
272 |
-
padding: 10px;
|
273 |
-
}
|
274 |
-
.examples-note {
|
275 |
-
font-size: 12px; color: var(--muted); margin-top: 8px;
|
276 |
-
}
|
277 |
-
|
278 |
-
|
279 |
-
.footer {
|
280 |
-
margin-top: 8px; text-align: center; color: var(--muted); font-size: 12px;
|
281 |
-
}
|
282 |
-
|
283 |
-
.container-narrow {
|
284 |
-
max-width: 980px; margin: 0 auto;
|
285 |
-
}
|
286 |
-
.section-gap { margin-top: 14px; }
|
287 |
-
|
288 |
-
|
289 |
-
.form.svelte-633qhp, .block.svelte-11xb1hd { background: transparent !important; box-shadow: none !important; border: none !important; }
|
290 |
-
"""
|
291 |
-
|
292 |
-
|
293 |
-
def text_stats(s: str):
|
294 |
-
|
295 |
-
chars = len(s or "")
|
296 |
-
words = len((s or "").split())
|
297 |
-
return f"{chars} chars • {words} words"
|
298 |
-
|
299 |
-
with gr.Blocks(css=css_template, fill_height=True, theme=gr.themes.Base()) as iface:
|
300 |
-
with gr.Column(elem_classes=["container-narrow"]):
|
301 |
-
|
302 |
-
with gr.Row(elem_classes=["header-card"]):
|
303 |
-
gr.HTML("<div class='logo-dot'></div>")
|
304 |
-
gr.HTML(f"<div class='header-title'>{title}</div>")
|
305 |
-
gr.HTML("<div class='header-sub'>ModernBERT • 3-model ensemble</div>")
|
306 |
-
|
307 |
-
with gr.Column(elem_classes=["section-gap", "card"]):
|
308 |
-
gr.Markdown(
|
309 |
-
f"""
|
310 |
-
<span style="font-size:16px">{description}</span>
|
311 |
-
<div class="chips" style="margin-top:10px">
|
312 |
-
<div class="chip">ModernBERT-base</div>
|
313 |
-
<div class="chip">Ensemble (x3)</div>
|
314 |
-
<div class="chip">Multiclass source ID</div>
|
315 |
-
<div class="chip">Human vs AI decision</div>
|
316 |
-
</div>
|
317 |
-
""",
|
318 |
-
elem_id="desc_top"
|
319 |
-
)
|
320 |
-
|
321 |
-
with gr.Column(elem_classes=["section-gap"]):
|
322 |
-
with gr.Row():
|
323 |
-
with gr.Column(scale=6, elem_classes=["card"]):
|
324 |
-
text_input = gr.Textbox(
|
325 |
-
label="Paste text",
|
326 |
-
placeholder="Paste or type your content here…",
|
327 |
-
lines=10,
|
328 |
-
elem_id="text_input_box"
|
329 |
-
)
|
330 |
-
|
331 |
-
with gr.Row(elem_classes=["controls-row"]):
|
332 |
-
stats_md = gr.Markdown("0 chars • 0 words")
|
333 |
-
with gr.Row():
|
334 |
-
clear_btn = gr.Button("Clear", elem_classes=["btn-primary"], variant="secondary")
|
335 |
-
copy_btn = gr.Button("Copy Result", elem_classes=["btn-primary"], variant="secondary")
|
336 |
-
|
337 |
-
with gr.Column(scale=4, elem_classes=["card"]):
|
338 |
-
gr.Markdown("### Result")
|
339 |
-
result_output = gr.Markdown("", elem_id="result_output_box")
|
340 |
-
|
341 |
-
with gr.Column(elem_classes=["section-gap", "tabs-wrap"]):
|
342 |
-
with gr.Tabs():
|
343 |
-
with gr.Tab("AI text examples"):
|
344 |
-
gr.Examples(AI_texts, inputs=text_input, examples_per_page=3)
|
345 |
-
with gr.Tab("Human text examples"):
|
346 |
-
gr.Examples(Human_texts, inputs=text_input, examples_per_page=3)
|
347 |
-
gr.Markdown(
|
348 |
-
"Use an example to prefill the textbox, then edit as needed.",
|
349 |
-
elem_classes=["examples-note"]
|
350 |
-
)
|
351 |
-
|
352 |
-
gr.Markdown(
|
353 |
-
f"<div class='footer'>{bottom_text} • <a href='https://aclanthology.org/2025.genaidetect-1.15/' target='_blank'>Paper</a></div>"
|
354 |
-
)
|
355 |
-
|
356 |
-
def _stats_update(s): return text_stats(s)
|
357 |
text_input.change(classify_text, inputs=text_input, outputs=result_output)
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
hidden_copy_src = gr.Textbox(visible=False)
|
364 |
-
copy_btn.click(_noop, inputs=result_output, outputs=hidden_copy_src)
|
365 |
|
366 |
-
iface.launch(share=True)
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
import torch
|
4 |
import re
|
5 |
from tokenizers import normalizers
|
6 |
from tokenizers.normalizers import Sequence, Replace, Strip, NFKC
|
7 |
+
from tokenizers import Regex
|
|
|
8 |
|
9 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
10 |
|
11 |
model1_path = "modernbert.bin"
|
12 |
model2_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12"
|
13 |
model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"
|
14 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
15 |
|
16 |
tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
|
17 |
|
|
|
27 |
model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device))
|
28 |
model_3.to(device).eval()
|
29 |
|
30 |
+
|
31 |
+
|
32 |
label_mapping = {
|
33 |
0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
|
34 |
6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
|
|
|
43 |
}
|
44 |
|
45 |
def clean_text(text: str) -> str:
|
46 |
+
text = re.sub(r'\s{2,}', ' ', text)
|
47 |
text = re.sub(r'\s+([,.;:?!])', r'\1', text)
|
48 |
return text
|
49 |
|
50 |
+
|
51 |
newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
|
52 |
join_hyphen_break = Replace(Regex(r'(\w+)[--]\s*\n\s*(\w+)'), r"\1\2")
|
53 |
|
|
|
61 |
def classify_text(text):
|
62 |
cleaned_text = clean_text(text)
|
63 |
if not text.strip():
|
64 |
+
result_message = (
|
65 |
+
f"---- \n"
|
66 |
+
)
|
67 |
return result_message
|
68 |
|
69 |
inputs = tokenizer(cleaned_text, return_tensors="pt", truncation=True, padding=True).to(device)
|
|
|
72 |
logits_1 = model_1(**inputs).logits
|
73 |
logits_2 = model_2(**inputs).logits
|
74 |
logits_3 = model_3(**inputs).logits
|
75 |
+
|
76 |
softmax_1 = torch.softmax(logits_1, dim=1)
|
77 |
softmax_2 = torch.softmax(logits_2, dim=1)
|
78 |
softmax_3 = torch.softmax(logits_3, dim=1)
|
79 |
+
|
80 |
averaged_probabilities = (softmax_1 + softmax_2 + softmax_3) / 3
|
81 |
probabilities = averaged_probabilities[0]
|
82 |
|
|
|
97 |
f"**The text is** <span class='highlight-ai'>**{ai_total_prob:.2f}%** likely <b>AI generated</b>.</span>\n\n"
|
98 |
f"**Identified LLM: {ai_argmax_model}**"
|
99 |
)
|
100 |
+
|
101 |
return result_message
|
102 |
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
title = "AI Text Detector"
|
108 |
|
109 |
description = """
|
110 |
+
This tool uses the <b>ModernBERT</b> model to identify whether a given text was written by a human or generated by artificial intelligence (AI). It works with a soft voting ensemble using <b>three</b> models, combining their outputs to improve the accuracy.<br>
|
111 |
+
<div style="line-height: 1.8;">
|
112 |
+
✅ <b>Human Verification:</b> Human-written content is clearly marked.<br>
|
113 |
+
🔍 <b>Model Detection:</b> Can identify content from over 40 AI models.<br>
|
114 |
+
📈 <b>Accuracy:</b> Works best with longer texts.<br>
|
115 |
+
📄 <b>Read more:</b> Our method is detailed in our paper:
|
116 |
+
<a href="https://aclanthology.org/2025.genaidetect-1.15/" target="_blank" style="color: #007bff; text-decoration: none;"><b>LINK</b></a>.
|
117 |
+
</div>
|
118 |
+
<br>
|
119 |
+
Paste your text below to analyze its origin.
|
120 |
"""
|
121 |
+
bottom_text = "**Developed by SzegedAI**"
|
122 |
|
|
|
123 |
|
124 |
AI_texts = [
|
125 |
"Camels are remarkable desert animals known for their unique adaptations to harsh, arid environments. Native to the Middle East, North Africa, and parts of Asia, camels have been essential to human life for centuries, serving as a mode of transportation, a source of food, and even a symbol of endurance and survival. There are two primary species of camels: the dromedary camel, which has a single hump and is commonly found in the Middle East and North Africa, and the Bactrian camel, which has two humps and is native to Central Asia. Their humps store fat, not water, as commonly believed, allowing them to survive long periods without food by metabolizing the stored fat for energy. Camels are highly adapted to desert life. They can go for weeks without water, and when they do drink, they can consume up to 40 gallons in one sitting. Their thick eyelashes, sealable nostrils, and wide, padded feet protect them from sand and help them walk easily on loose desert terrain.",
|
|
|
131 |
"The present book is intended as a text in basic mathematics. As such, it can have multiple use: for a one-year course in the high schools during the third or fourth year (if possible the third, so that calculus can be taken during the fourth year); for a complementary reference in earlier high school grades (elementary algebra and geometry are covered); for a one-semester course at the college level, to review or to get a firm foundation in the basic mathematics necessary to go ahead in calculus, linear algebra, or other topics. Years ago, the colleges used to give courses in “ college algebra” and other subjects which should have been covered in high school. More recently, such courses have been thought unnecessary, but some experiences I have had show that they are just as necessary as ever. What is happening is that thecolleges are getting a wide variety of students from high schools, ranging from exceedingly well-prepared ones who have had a good first course in calculus, down to very poorly prepared ones.",
|
132 |
"Fats are rich in energy, build body cells, support brain development of infants, help body processes, and facilitate the absorption and use of fat-soluble vitamins A, D, E, and K. The major component of lipids is glycerol and fatty acids. According to chemical properties, fatty acids can be divided into saturated and unsaturated fatty acids. Generally lipids containing saturated fatty acids are solid at room temperature and include animal fats (butter, lard, tallow, ghee) and tropical oils (palm,coconut, palm kernel). Saturated fats increase the risk of heart disease.",
|
133 |
"To make BERT handle a variety of down-stream tasks, our input representation is able to unambiguously represent both a single sentence and a pair of sentences (e.g., h Question, Answeri) in one token sequence. Throughout this work, a “sentence” can be an arbitrary span of contiguous text, rather than an actual linguistic sentence. A “sequence” refers to the input token sequence to BERT, which may be a single sentence or two sentences packed together. We use WordPiece embeddings (Wu et al., 2016) with a 30,000 token vocabulary. The first token of every sequence is always a special classification token ([CLS]). The final hidden state corresponding to this token is used as the aggregate sequence representation for classification tasks. Sentence pairs are packed together into a single sequence."]
|
134 |
+
iface = gr.Blocks(css="""
|
135 |
+
@import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@400;700&display=swap');
|
136 |
+
#text_input_box {
|
137 |
+
border-radius: 10px;
|
138 |
+
border: 2px solid #4CAF50;
|
139 |
+
font-size: 18px;
|
140 |
+
padding: 15px;
|
141 |
+
margin-bottom: 20px;
|
142 |
+
width: 60%;
|
143 |
+
box-sizing: border-box;
|
144 |
+
margin: auto;
|
145 |
+
}
|
146 |
+
.form.svelte-633qhp {
|
147 |
+
background: none;
|
148 |
+
border: none;
|
149 |
+
box-shadow: none;
|
150 |
+
}
|
151 |
+
#result_output_box {
|
152 |
+
border-radius: 10px;
|
153 |
+
border: 2px solid #4CAF50;
|
154 |
+
font-size: 18px;
|
155 |
+
padding: 15px;
|
156 |
+
margin-top: 20px;
|
157 |
+
width: 40%;
|
158 |
+
box-sizing: border-box;
|
159 |
+
text-align: center;
|
160 |
+
margin: auto;
|
161 |
+
}
|
162 |
+
@media (max-width: 768px) {
|
163 |
+
#result_output_box {
|
164 |
+
width: 100%;
|
165 |
+
}
|
166 |
+
#text_input_box{
|
167 |
+
width: 100%;
|
168 |
+
}
|
169 |
+
}
|
170 |
+
body {
|
171 |
+
font-family: 'Roboto Mono', sans-serif !important;
|
172 |
+
padding: 20px;
|
173 |
+
display: block;
|
174 |
+
justify-content: center;
|
175 |
+
align-items: center;
|
176 |
+
height: 100vh;
|
177 |
+
overflow-y: auto;
|
178 |
+
}
|
179 |
+
.gradio-container {
|
180 |
+
border: 1px solid #4CAF50;
|
181 |
+
border-radius: 15px;
|
182 |
+
padding: 30px;
|
183 |
+
box-shadow: 0px 0px 10px rgba(0,255,0,0.6);
|
184 |
+
max-width: 600px;
|
185 |
+
margin: auto;
|
186 |
+
overflow-y: auto;
|
187 |
+
}
|
188 |
+
h1 {
|
189 |
+
text-align: center;
|
190 |
+
font-size: 32px;
|
191 |
+
font-weight: bold;
|
192 |
+
margin-bottom: 30px;
|
193 |
+
}
|
194 |
+
.highlight-human {
|
195 |
+
color: #4CAF50;
|
196 |
+
font-weight: bold;
|
197 |
+
background: rgba(76, 175, 80, 0.2);
|
198 |
+
padding: 5px;
|
199 |
+
border-radius: 8px;
|
200 |
+
}
|
201 |
+
.highlight-ai {
|
202 |
+
color: #FF5733;
|
203 |
+
font-weight: bold;
|
204 |
+
background: rgba(255, 87, 51, 0.2);
|
205 |
+
padding: 5px;
|
206 |
+
border-radius: 8px;
|
207 |
+
}
|
208 |
+
#bottom_text {
|
209 |
+
text-align: center;
|
210 |
+
margin-top: 50px;
|
211 |
+
font-weight: bold;
|
212 |
+
font-size: 20px;
|
213 |
+
}
|
214 |
+
.block.svelte-11xb1hd{
|
215 |
+
background: none !important;
|
216 |
+
}
|
217 |
+
""")
|
218 |
+
|
219 |
+
with iface:
|
220 |
+
gr.Markdown(f"# {title}")
|
221 |
+
gr.Markdown(description)
|
222 |
+
text_input = gr.Textbox(label="", placeholder="Type or paste your content here...", elem_id="text_input_box", lines=5)
|
223 |
+
result_output = gr.Markdown("", elem_id="result_output_box")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
text_input.change(classify_text, inputs=text_input, outputs=result_output)
|
225 |
+
with gr.Tab("AI text examples"):
|
226 |
+
gr.Examples(AI_texts, inputs=text_input)
|
227 |
+
with gr.Tab("Human text examples"):
|
228 |
+
gr.Examples(Human_texts, inputs=text_input)
|
229 |
+
gr.Markdown(bottom_text, elem_id="bottom_text")
|
|
|
|
|
230 |
|
231 |
+
iface.launch(share=True)
|