Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from peft import PeftModel, PeftConfig
|
|
7 |
token = os.environ.get("token")
|
8 |
login(token)
|
9 |
print("login is succesful")
|
10 |
-
max_length=
|
11 |
|
12 |
MODEL_NAME = "google/flan-t5-base"
|
13 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
|
@@ -17,7 +17,7 @@ model = PeftModel.from_pretrained(base_model, "Komal-patra/results")
|
|
17 |
|
18 |
#gr.Interface.from_pipeline(pipe).launch()
|
19 |
|
20 |
-
def generate_text(prompt, max_length=
|
21 |
"""Generates text using the PEFT model.
|
22 |
Args:
|
23 |
prompt (str): The user-provided prompt to start the generation.
|
@@ -34,9 +34,8 @@ def generate_text(prompt, max_length=200):
|
|
34 |
outputs = model.generate(
|
35 |
input_ids = inputs["input_ids"],
|
36 |
max_length=max_length,
|
37 |
-
num_beams=
|
38 |
-
|
39 |
-
temperature=0.9
|
40 |
)
|
41 |
|
42 |
print(outputs)
|
|
|
7 |
token = os.environ.get("token")
|
8 |
login(token)
|
9 |
print("login is succesful")
|
10 |
+
max_length=512
|
11 |
|
12 |
MODEL_NAME = "google/flan-t5-base"
|
13 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
|
|
|
17 |
|
18 |
#gr.Interface.from_pipeline(pipe).launch()
|
19 |
|
20 |
+
def generate_text(prompt, max_length=512):
|
21 |
"""Generates text using the PEFT model.
|
22 |
Args:
|
23 |
prompt (str): The user-provided prompt to start the generation.
|
|
|
34 |
outputs = model.generate(
|
35 |
input_ids = inputs["input_ids"],
|
36 |
max_length=max_length,
|
37 |
+
num_beams=1,
|
38 |
+
temperature=0.1
|
|
|
39 |
)
|
40 |
|
41 |
print(outputs)
|