Norod78 commited on
Commit
ddf9fae
·
1 Parent(s): 0e85e84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -42,7 +42,7 @@ def extend(input_text, max_size=20, top_k=50, top_p=0.95):
42
  top_k=top_k,
43
  top_p=top_p,
44
  do_sample=True,
45
- repetition_penalty=25.0,
46
  num_return_sequences=1)
47
 
48
  # Remove the batch dimension when returning multiple sequences
@@ -99,7 +99,7 @@ if __name__ == "__main__":
99
 
100
  st.sidebar.subheader("Configurable parameters")
101
 
102
- max_len = st.sidebar.slider("Max-Length", 0, 256, 192,help="The maximum length of the sequence to be generated.")
103
  top_k = st.sidebar.slider("Top-K", 0, 100, 40, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
104
  top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 0.92, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
105
 
 
42
  top_k=top_k,
43
  top_p=top_p,
44
  do_sample=True,
45
+ repetition_penalty=2.5,
46
  num_return_sequences=1)
47
 
48
  # Remove the batch dimension when returning multiple sequences
 
99
 
100
  st.sidebar.subheader("Configurable parameters")
101
 
102
+ max_len = st.sidebar.slider("Max-Length", 0, 128, 192,help="The maximum length of the sequence to be generated.")
103
  top_k = st.sidebar.slider("Top-K", 0, 100, 40, help="The number of highest probability vocabulary tokens to keep for top-k-filtering.")
104
  top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 0.92, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
105