Update app.py
Browse files
app.py
CHANGED
|
@@ -12,8 +12,7 @@ if torch.cuda.is_available():
|
|
| 12 |
else:
|
| 13 |
device = "cpu"
|
| 14 |
print("Using CPU")
|
| 15 |
-
|
| 16 |
-
model.to(device)
|
| 17 |
|
| 18 |
def generate(
|
| 19 |
precision_model,
|
|
@@ -29,6 +28,8 @@ def generate(
|
|
| 29 |
|
| 30 |
model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=precision_model)
|
| 31 |
|
|
|
|
|
|
|
| 32 |
input_text = f"{system_prompt}, {prompt}"
|
| 33 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 34 |
|
|
@@ -53,7 +54,7 @@ def generate(
|
|
| 53 |
return better_prompt
|
| 54 |
|
| 55 |
|
| 56 |
-
precision_model = gr.Radio([('fp32', torch.float32), ('fp16', toch.float16)], label="Model Precision Type", info="fp32 is more precised but slower, fp16 is faster and less resource consuming but less pricse")
|
| 57 |
|
| 58 |
prompt = gr.Textbox(label="Prompt", interactive=True)
|
| 59 |
|
|
|
|
| 12 |
else:
|
| 13 |
device = "cpu"
|
| 14 |
print("Using CPU")
|
| 15 |
+
|
|
|
|
| 16 |
|
| 17 |
def generate(
|
| 18 |
precision_model,
|
|
|
|
| 28 |
|
| 29 |
model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=precision_model)
|
| 30 |
|
| 31 |
+
model.to(device)
|
| 32 |
+
|
| 33 |
input_text = f"{system_prompt}, {prompt}"
|
| 34 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 35 |
|
|
|
|
| 54 |
return better_prompt
|
| 55 |
|
| 56 |
|
| 57 |
+
precision_model = gr.Radio([('fp32', torch.float32), ('fp16', toch.float16)], value='fp16', label="Model Precision Type", info="fp32 is more precised but slower, fp16 is faster and less resource consuming but less pricse")
|
| 58 |
|
| 59 |
prompt = gr.Textbox(label="Prompt", interactive=True)
|
| 60 |
|