File size: 1,491 Bytes
2b6b068
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import gradio as gr
from prompt_generator import generate_prompt

tokenizers = [
    "google/gemma-7b",
    "meta-llama/Llama-2-7b",
    "mistralai/Mistral-7B-v0.1",
    "facebook/opt-2.7b",
    "microsoft/phi-2",
    "THUDM/chatglm3-6b",
    "Qwen/Qwen1.5-7B-Chat",
    "bigscience/bloom-560m",
    "ise-uiuc/Magicoder-S-DS-6.7B",
    "google/flan-t5-base",
    "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
    "google-bert/bert-base-uncased",
]


def generate(model_id, num_tokens):
    output_file = f"prompt_{num_tokens}.jsonl"
    prompt = generate_prompt(model_id, int(num_tokens), silent=True, output_file=output_file)
    return prompt, output_file


demo = gr.Interface(
    fn=generate,
    title="Prompt Generator",
    description="Generate prompts with a given length for testing transformer models. "
    "Prompt source: https://archive.org/stream/alicesadventures19033gut/19033.txt",
    inputs=[
        gr.Dropdown(label="Tokenizer", choices=tokenizers, allow_custom_value=True),
        gr.Textbox(label="Number of Tokens"),
    ],
    outputs=[gr.Textbox(label="prompt", show_copy_button=True), gr.File(label="Json file")],
    examples=[
        ["mistralai/Mistral-7B-v0.1", 32],
        ["mistralai/Mistral-7B-v0.1", 64],
        ["mistralai/Mistral-7B-v0.1", 128],
        ["mistralai/Mistral-7B-v0.1", 512],
        ["mistralai/Mistral-7B-v0.1", 1024],
        ["mistralai/Mistral-7B-v0.1", 2048],
    ],
    cache_examples=False,
    allow_flagging=False,
)

demo.launch()