Nick088 commited on
Commit
051f066
·
verified ·
1 Parent(s): 701a9da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -11,8 +11,8 @@ else:
11
  print("Using CPU")
12
 
13
  tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
14
- model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=torch.float16)
15
-
16
  model.to(device)
17
 
18
 
@@ -51,6 +51,8 @@ def generate(
51
 
52
  prompt = gr.Textbox(label="Prompt", interactive=True)
53
 
 
 
54
  max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1, interactive=True, label="Max New Tokens", info="The maximum numbers of new tokens, controls how long is the output")
55
 
56
  repetition_penalty = gr.Slider(value=1.2, minimum=0, maximum=2, step=0.05, interactive=True, label="Repetition Penalty", info="Penalize repeated tokens, making the AI repeat less itself")
@@ -77,7 +79,7 @@ examples = [
77
 
78
  gr.Interface(
79
  fn=generate,
80
- inputs=[prompt, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed],
81
  outputs=gr.Textbox(label="Better Prompt"),
82
  title="SuperPrompt-v1",
83
  description='Make your prompts more detailed! <br> <a href="https://huggingface.co/roborovski/superprompt-v1">Model used</a> <br> <a href="https://brianfitzgerald.xyz/prompt-augmentation/">Model Blog</a> <br> Task Prefix: "Expand the following prompt to add more detail:" is already setted! <br> Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)',
 
11
  print("Using CPU")
12
 
13
  tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
14
+ model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", device_map="auto", torch_dtype=model_precision_type
15
+
16
  model.to(device)
17
 
18
 
 
51
 
52
  prompt = gr.Textbox(label="Prompt", interactive=True)
53
 
54
+ model_precision_type = gr.Dropdown(choices=[('fp16', torch.float16), ('fp32', torch.float32)], type="value", value=torch.float16, label="Model Precision", info="fp16 is faster, fp32 is more precise"),
55
+
56
  max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1, interactive=True, label="Max New Tokens", info="The maximum numbers of new tokens, controls how long is the output")
57
 
58
  repetition_penalty = gr.Slider(value=1.2, minimum=0, maximum=2, step=0.05, interactive=True, label="Repetition Penalty", info="Penalize repeated tokens, making the AI repeat less itself")
 
79
 
80
  gr.Interface(
81
  fn=generate,
82
+ inputs=[prompt, model_precision_type, max_new_tokens, repetition_penalty, temperature, top_p, top_k, seed],
83
  outputs=gr.Textbox(label="Better Prompt"),
84
  title="SuperPrompt-v1",
85
  description='Make your prompts more detailed! <br> <a href="https://huggingface.co/roborovski/superprompt-v1">Model used</a> <br> <a href="https://brianfitzgerald.xyz/prompt-augmentation/">Model Blog</a> <br> Task Prefix: "Expand the following prompt to add more detail:" is already setted! <br> Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)',