|
import gradio as gr |
|
import torch |
|
from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
print("Using GPU") |
|
else: |
|
device = "cpu" |
|
print("Using CPU") |
|
|
|
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small") |
|
model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=torch.float16) |
|
|
|
|
|
def generate( |
|
prompt, history, temperature=0.9, max_new_tokens=250, repetition_penalty=1.0, |
|
): |
|
|
|
input_text = f"{prompt}, {history}" |
|
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) |
|
outputs = model.generate(input_ids, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty) |
|
better_prompt = tokenizer.decode(outputs[0]) |
|
return better_prompt |
|
|
|
|
|
additional_inputs=[ |
|
gr.Slider( |
|
label="Repetition penalty", |
|
value=1.2, |
|
minimum=1.0, |
|
maximum=2.0, |
|
step=0.05, |
|
interactive=True, |
|
info="Penalize repeated tokens", |
|
) |
|
] |
|
|
|
examples=[["Expand the following prompt to add more detail: A storefront with 'Text to Image' written on it.", None, None ]] |
|
|
|
gr.ChatInterface( |
|
fn=generate, |
|
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), |
|
additional_inputs=additional_inputs, |
|
title="SuperPrompt-v1", |
|
description="Make your prompts more detailed! Especially for AI Art!!!", |
|
examples=examples, |
|
concurrency_limit=20, |
|
).launch(show_api=False) |