ColeGuion commited on
Commit
9d59ab3
·
verified ·
1 Parent(s): 913f8fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -8
app.py CHANGED
@@ -37,18 +37,15 @@ def format_prompt_grammar(message, history):
37
 
38
 
39
 
40
- def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, gec=False):
41
  temperature = float(temperature)
42
  if temperature < 1e-2: temperature = 1e-2
43
  top_p = float(top_p)
44
 
45
  generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
46
 
47
-
48
- if gec:
49
- formatted_prompt = format_prompt_grammar(f"Corrected Sentence: {prompt}", history)
50
- else:
51
- formatted_prompt = format_prompt(f"{system_prompt} {prompt}", history)
52
  print("\nPROMPT: \n\t" + formatted_prompt)
53
 
54
  # Generate text from the HF inference
@@ -67,8 +64,7 @@ additional_inputs=[
67
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
68
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
69
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
70
- gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", ),
71
- gr.Checkbox( label="GEC", value="GEC Mode?", interactive=True, )
72
  ]
73
 
74
  examples=['Give me the grammatically correct version of the sentence: "We shood buy an car"', "Give me an example exam question testing students on square roots on basic integers", "Would this block of HTML code run?\n```\n\n```"]
 
37
 
38
 
39
 
40
+ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
41
  temperature = float(temperature)
42
  if temperature < 1e-2: temperature = 1e-2
43
  top_p = float(top_p)
44
 
45
  generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
46
 
47
+ #formatted_prompt = format_prompt_grammar(f"Corrected Sentence: {prompt}", history)
48
+ formatted_prompt = format_prompt(f"{system_prompt} {prompt}", history)
 
 
 
49
  print("\nPROMPT: \n\t" + formatted_prompt)
50
 
51
  # Generate text from the HF inference
 
64
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
65
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
66
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
67
+ gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", )
 
68
  ]
69
 
70
  examples=['Give me the grammatically correct version of the sentence: "We shood buy an car"', "Give me an example exam question testing students on square roots on basic integers", "Would this block of HTML code run?\n```\n\n```"]