Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,10 @@ tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
|
8 |
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
9 |
|
10 |
print("Loading finished.")
|
11 |
-
n_steps=12
|
12 |
-
n_beams=1
|
13 |
-
length_penalty=1
|
14 |
-
num_return_sequences=3
|
15 |
|
16 |
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
17 |
# True
|
|
|
8 |
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
9 |
|
10 |
print("Loading finished.")
|
11 |
+
global n_steps=12
|
12 |
+
global n_beams=1
|
13 |
+
global length_penalty=1
|
14 |
+
global num_return_sequences=3
|
15 |
|
16 |
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
17 |
# True
|