Spaces:
Running
Running
File size: 3,273 Bytes
f97bf68 69855af f97bf68 b0413b4 00b74b1 bcaeb63 b0413b4 c9744e9 a1befb3 dc36f25 f97bf68 244f082 e1957fa 69855af a81c6ef ead2968 a1befb3 244f082 f97bf68 a1befb3 f97bf68 1aa631a a1befb3 051f066 1aa631a a1befb3 1aa631a a1befb3 1aa631a 2d82305 f97bf68 244f082 a1befb3 244f082 f97bf68 1aa631a f97bf68 051f066 78cc8b8 f97bf68 572937f f97bf68 ba5b46d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import gradio as gr
import torch
import random
from transformers import T5Tokenizer, T5ForConditionalGeneration
if torch.cuda.is_available():
device = "cuda"
print("Using GPU")
else:
device = "cpu"
print("Using CPU")
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
def generate(prompt, model_precision_type, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed):
model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", device_map="auto", torch_dtype=model_precision_type)
model.to(device)
input_text = f"Expand the following prompt to add more detail: {prompt}"
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
if seed == 0:
seed = random.randint(1, 100000)
torch.manual_seed(seed)
else:
torch.manual_seed(seed)
outputs = model.generate(
input_ids,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
)
better_prompt = tokenizer.decode(outputs[0])
better_prompt = better_prompt.replace("<pad>", "").replace("<|endoftext|>", "")
return better_prompt
prompt = gr.Textbox(label="Prompt", interactive=True)
model_precision_type = gr.Dropdown(choices=[('fp16', torch.float16), ('fp32', torch.float32)], type="value", value=torch.float16, label="Model Precision", info="fp16 is faster, fp32 is more precise")
max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1, interactive=True, label="Max New Tokens", info="The maximum numbers of new tokens, controls how long is the output")
repetition_penalty = gr.Slider(value=1.2, minimum=0, maximum=2, step=0.05, interactive=True, label="Repetition Penalty", info="Penalize repeated tokens, making the AI repeat less itself")
temperature = gr.Slider(value=0.5, minimum=0, maximum=1, step=0.05, interactive=True, label="Temperature", info="Higher values produce more diverse outputs")
top_p = gr.Slider(value=1, minimum=0, maximum=2, step=0.05, interactive=True, label="Top P", info="Higher values sample more low-probability tokens")
top_k = gr.Slider(value=1, minimum=1, maximum=100, step=1, interactive=True, label="Top K", info="Higher k means more diverse outputs by considering a range of tokens")
seed = gr.Number(value=42, interactive=True, label="Seed", info="A starting point to initiate the generation process, put 0 for a random one")
examples = [
["A storefront with 'Text to Image' written on it.", "fp16", 512, 1.2, 0.5, 1, 50, 42]
]
gr.Interface(
fn=generate,
inputs=[prompt, model_precision_type, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed],
outputs=gr.Textbox(label="Better Prompt"),
title="SuperPrompt-v1",
description='Make your prompts more detailed! <br> <a href="https://huggingface.co/roborovski/superprompt-v1">Model used</a> <br> <a href="https://brianfitzgerald.xyz/prompt-augmentation/">Model Blog</a> <br> Task Prefix: "Expand the following prompt to add more detail:" is already setted! <br> Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)',
examples=examples,
).launch(show_api=False, share=True) |