cakemus commited on
Commit
4e0781d
·
1 Parent(s): 86a1b48

max length

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -8,7 +8,8 @@ def generate_text(prompt):
8
  # Load the model within the function so that it only runs on GPU when the function is called
9
  model = pipeline("text-generation", model="EleutherAI/gpt-neo-125M", device=0)
10
 
11
- return model(prompt, max_length=50)[0]["generated_text"]
 
12
 
13
  # Create the Gradio interface
14
  interface = gr.Interface(
 
8
  # Load the model within the function so that it only runs on GPU when the function is called
9
  model = pipeline("text-generation", model="EleutherAI/gpt-neo-125M", device=0)
10
 
11
+ return model(prompt, max_length=150, temperature=0.7, top_p=0.9)[0]["generated_text"]
12
+
13
 
14
  # Create the Gradio interface
15
  interface = gr.Interface(