Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,18 @@ import torch
|
|
3 |
import random
|
4 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
def generate(
|
7 |
prompt,
|
8 |
history,
|
@@ -13,19 +25,7 @@ def generate(
|
|
13 |
top_k,
|
14 |
random_seed,
|
15 |
seed,
|
16 |
-
precision=torch.float16,
|
17 |
):
|
18 |
-
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
|
19 |
-
model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=torch.float16)
|
20 |
-
|
21 |
-
if torch.cuda.is_available():
|
22 |
-
device = "cuda"
|
23 |
-
print("Using GPU")
|
24 |
-
else:
|
25 |
-
device = "cpu"
|
26 |
-
print("Using CPU")
|
27 |
-
|
28 |
-
model.to(device)
|
29 |
|
30 |
input_text = f"{prompt}, {history}"
|
31 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
@@ -106,12 +106,6 @@ additional_inputs = [
|
|
106 |
label="Manual Seed",
|
107 |
info="A starting point to initiate the generation process"
|
108 |
),
|
109 |
-
gr.Radio(
|
110 |
-
choices=[("fp32", torch.float32), ("fp16", torch.float16)],
|
111 |
-
value=torch.float16,
|
112 |
-
label="Model Precision",
|
113 |
-
info="fp32 is more precised, fp16 is faster and less memory consuming",
|
114 |
-
),
|
115 |
]
|
116 |
|
117 |
|
@@ -125,7 +119,6 @@ examples = [
|
|
125 |
50,
|
126 |
False,
|
127 |
42,
|
128 |
-
torch.float16,
|
129 |
]
|
130 |
]
|
131 |
|
|
|
3 |
import random
|
4 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
5 |
|
6 |
+
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
|
7 |
+
model = T5ForConditionalGeneration.from_pretrained("roborovski/superprompt-v1", torch_dtype=torch.float16)
|
8 |
+
|
9 |
+
if torch.cuda.is_available():
|
10 |
+
device = "cuda"
|
11 |
+
print("Using GPU")
|
12 |
+
else:
|
13 |
+
device = "cpu"
|
14 |
+
print("Using CPU")
|
15 |
+
|
16 |
+
model.to(device)
|
17 |
+
|
18 |
def generate(
|
19 |
prompt,
|
20 |
history,
|
|
|
25 |
top_k,
|
26 |
random_seed,
|
27 |
seed,
|
|
|
28 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
input_text = f"{prompt}, {history}"
|
31 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
|
|
106 |
label="Manual Seed",
|
107 |
info="A starting point to initiate the generation process"
|
108 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
]
|
110 |
|
111 |
|
|
|
119 |
50,
|
120 |
False,
|
121 |
42,
|
|
|
122 |
]
|
123 |
]
|
124 |
|