Spaces:
Sleeping
Sleeping
import gradio as gr | |
from mosaic import Mosaic | |
import spaces | |
import traceback | |
# Maximum number of model textboxes | |
MAX_MODELS = 10 | |
GPT_CONFIG_MODELS = [ | |
"openai-community/gpt2-large", | |
"openai-community/gpt2-medium", | |
"openai-community/gpt2" | |
] | |
Falcon_CONFIG_MODELS = [ | |
"tiiuae/Falcon3-10B-Base", | |
"tiiuae/Falcon3-7B-Instruct", | |
"tiiuae/Falcon3-7B-Base" | |
] | |
# Increase model slots | |
def update_textboxes(n_visible): | |
if n_visible < MAX_MODELS: | |
n_visible += 1 | |
tb_updates = [gr.update(visible=(i < n_visible)) for i in range(MAX_MODELS)] | |
return (n_visible, *tb_updates) | |
# Decrease model slots and clear removed entries | |
def remove_textboxes(n_visible): | |
old = n_visible | |
if n_visible > 2: | |
n_visible -= 1 | |
tb_updates = [] | |
for i in range(MAX_MODELS): | |
if i < n_visible: | |
tb_updates.append(gr.update(visible=True)) | |
else: | |
tb_updates.append(gr.update(visible=False, value="")) | |
return (n_visible, *tb_updates) | |
def apply_config1(): | |
""" | |
Returns: | |
- new n_visible (number of boxes to show) | |
- new values & visibility for each model textbox | |
- new visibility for each Load button & status box | |
""" | |
n_vis = len(GPT_CONFIG_MODELS) | |
tb_updates = [] | |
for i in range(MAX_MODELS): | |
if i < n_vis: | |
# show this slot, set its value from CONFIG_MODELS | |
tb_updates.append(gr.update(visible=True, value=GPT_CONFIG_MODELS[i])) | |
else: | |
# hide all others | |
tb_updates.append(gr.update(visible=False, value="")) | |
# Return in the same shape as your update_textboxes/remove_textboxes: | |
# (n_models_state, *all textboxes, *all load buttons, *all status boxes) | |
return (n_vis, *tb_updates) | |
def apply_config2(): | |
""" | |
Returns: | |
- new n_visible (number of boxes to show) | |
- new values & visibility for each model textbox | |
- new visibility for each Load button & status box | |
""" | |
n_vis = len(Falcon_CONFIG_MODELS) | |
tb_updates = [] | |
for i in range(MAX_MODELS): | |
if i < n_vis: | |
# show this slot, set its value from CONFIG_MODELS | |
tb_updates.append(gr.update(visible=True, value=Falcon_CONFIG_MODELS[i])) | |
else: | |
# hide all others | |
tb_updates.append(gr.update(visible=False, value="")) | |
# Return in the same shape as your update_textboxes/remove_textboxes: | |
# (n_models_state, *all textboxes, *all load buttons, *all status boxes) | |
return (n_vis, *tb_updates) | |
def run_scoring(input_text, *args): | |
""" | |
args: first MAX_MODELS entries are model paths, followed by threshold_choice and custom_threshold | |
""" | |
try: | |
# unpack | |
models = [m.strip() for m in args[:MAX_MODELS] if m.strip()] | |
threshold_choice = args[MAX_MODELS] | |
custom_threshold = args[MAX_MODELS+1] | |
if len(models) < 2: | |
return "Please enter at least two model paths.", None, None | |
threshold = 0.0 if threshold_choice == "default" else custom_threshold | |
mosaic_instance = Mosaic(model_name_or_paths=models, one_model_mode=False) | |
final_score = mosaic_instance.compute_end_score(input_text) | |
msg = "This text was probably generated." if final_score < threshold else "This text is likely human-written." | |
return msg, final_score, threshold | |
except Exception as e: | |
tb = traceback.format_exc() | |
return f"Error: {e}\n{tb}", None, None | |
# Build Blocks UI | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown("# MOSAIC Scoring App") | |
with gr.Row(): | |
input_text = gr.Textbox(lines=10, placeholder="Enter text here...", label="Input Text") | |
with gr.Column(): | |
gr.Markdown("**⚠️ Please make sure all models have the same tokenizer or it won’t work.**") | |
gr.Markdown("### Model Paths (at least 2 required)") | |
n_models_state = gr.State(4) | |
model_inputs = [] | |
for i in range(1, MAX_MODELS+1): | |
with gr.Row(): | |
tb = gr.Textbox(label=f"Model {i} Path", value="" if i > 4 else None, visible=(i <= 4)) | |
model_inputs.append(tb) | |
with gr.Row(): | |
plus = gr.Button("Add model slot", elem_id="plus_button") | |
minus = gr.Button("Remove model slot", elem_id="minus_button") | |
config1_btn = gr.Button("Try Basic gpt Configuration") | |
plus.click( | |
fn=update_textboxes, | |
inputs=n_models_state, | |
outputs=[n_models_state, *model_inputs] | |
) | |
minus.click( | |
fn=remove_textboxes, | |
inputs=n_models_state, | |
outputs=[n_models_state, *model_inputs] | |
) | |
config1_btn.click( | |
fn=apply_config1, | |
inputs=None, | |
outputs=[ | |
n_models_state, | |
*model_inputs | |
] | |
) | |
with gr.Row(): | |
threshold_choice = gr.Radio(choices=["default", "custom"], value="default", label="Threshold Choice") | |
custom_threshold = gr.Number(value=0.0, label="Custom Threshold (if 'custom' selected)") | |
with gr.Row(): | |
output_message = gr.Textbox(label="Result Message") | |
output_score = gr.Number(label="Final Score") | |
output_threshold = gr.Number(label="Threshold Used") | |
gr.Markdown("**⚠️ All models need to be loaded for scoring, this can take time**") | |
run_button = gr.Button("Run Scoring") | |
run_button.click( | |
fn=run_scoring, | |
inputs=[input_text, *model_inputs, threshold_choice, custom_threshold], | |
outputs=[output_message, output_score, output_threshold] | |
) | |
# Launch | |
demo.queue() | |
demo.launch() |