File size: 3,603 Bytes
1be017e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import spaces
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

class ModelProcessor:
    def __init__(self, repo_id="HuggingFaceTB/cosmo-1b"):
        self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
        self.tokenizer = AutoTokenizer.from_pretrained(repo_id, use_fast=True)
        self.model = AutoModelForCausalLM.from_pretrained(
            repo_id, torch_dtype=torch.float16, device_map={"": self.device}, trust_remote_code=True
        )
        self.model.eval()
        self.tokenizer.pad_token = self.tokenizer.eos_token

    @torch.inference_mode()
    def process_data_and_compute_statistics(self, prompt):
        tokens = self.tokenizer(
            prompt, return_tensors="pt", truncation=True, max_length=512
        ).to(self.model.device)
        outputs = self.model(tokens["input_ids"])
        logits = outputs.logits
        shifted_labels = tokens["input_ids"][..., 1:].contiguous()
        shifted_logits = logits[..., :-1, :].contiguous()
        shifted_probs = torch.softmax(shifted_logits, dim=-1)
        shifted_log_probs = torch.log_softmax(shifted_logits, dim=-1)
        entropy = -torch.sum(shifted_probs * shifted_log_probs, dim=-1).squeeze()
        logits_flat = shifted_logits.view(-1, shifted_logits.size(-1))
        labels_flat = shifted_labels.view(-1)
        probabilities_flat = torch.softmax(logits_flat, dim=-1)
        true_class_probabilities = probabilities_flat.gather(
            1, labels_flat.unsqueeze(1)
        ).squeeze(1)
        nll = -torch.log(
            true_class_probabilities.clamp(min=1e-9)
        )
        ranks = (
            shifted_logits.argsort(dim=-1, descending=True)
            == shifted_labels.unsqueeze(-1)
        ).nonzero()[:, -1]
        if entropy.clamp(max=4).median() < 2.0:
            return 1
        return 1 if (ranks.clamp(max=4) * nll.clamp(max=4)).mean() < 5.2 else 0

processor = ModelProcessor()

@spaces.GPU(duration=180)
def detect(prompt):
    prediction = processor.process_data_and_compute_statistics(prompt)
    if prediction == 1:
        return "<div class='output-text'>The text is likely <b>generated</b> by a language model.</div>"
    else:
        return "<div class='output-text'>The text is likely <b>not generated</b> by a language model.</div>"

with gr.Blocks(
    css="""
    .gradio-container {
        max-width: 800px;
        margin: 0 auto;
    }
    .gr-box {
        box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
        padding: 20px;
        border-radius: 4px;
    }
    .gr-button {
        background-color: #007bff;
        color: white;        
        padding: 10px 20px;
        border-radius: 4px;
    }
    .gr-button:hover {
        background-color: #0056b3;
    }
    .hyperlinks a {
        margin-right: 10px;
    }
    .output-text {
        text-align: center;
        font-size: 24px;
        font-weight: bold;
    }
"""
) as demo:
    with gr.Row():
        with gr.Column(scale=3):
            gr.Markdown("# ENTELL Model Detection - ChatGPTBots.net")
        with gr.Column(scale=1):
            gr.HTML(
                """
            """,
                elem_classes="hyperlinks",
            )
    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(
                lines=8,
                placeholder="Type your prompt here...",
                label="Prompt",
            )
            submit_btn = gr.Button("Submit", variant="primary")
            output = gr.HTML()  # Changed to gr.HTML() to support custom HTML

    submit_btn.click(fn=detect, inputs=promp