ColeGuion commited on
Commit
3edcd37
·
verified ·
1 Parent(s): 7e14f2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -34,14 +34,14 @@ def format_prompt_grammar(message, history):
34
  prompt_template = "[INST] " + prompt_prefix + ' {} [/INST]'
35
 
36
 
37
- myList = [["It is my friends house in England.", "It is my friend's house in England."], ["Every girl must bring their books to school.", "Every girl must bring her books to school."]]
38
  myList = myList + history
39
 
40
 
41
  # Iterates through every past user input and response to be added to the prompt
42
  for user_prompt, bot_response in myList:
43
  prompt += prompt_template.format(user_prompt)
44
- prompt += f" {bot_response}</s> "
45
 
46
  prompt += prompt_template.format(message)
47
  print("PROMPT: \n\t{}\n".format(prompt))
@@ -60,7 +60,7 @@ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256
60
  generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
61
 
62
  #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
63
- formatted_prompt = format_prompt_grammar(f"{system_prompt}, {prompt}", history)
64
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
65
  output = ""
66
 
@@ -72,7 +72,7 @@ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256
72
 
73
 
74
  additional_inputs=[
75
- gr.Textbox( label="System Prompt", value="Correct the following sentence to make it grammatically accurate while maintaining the original meaning. Your response should contain nothing more than the corrected text and nothing more." , max_lines=1, interactive=True, ),
76
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
77
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
78
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
 
34
  prompt_template = "[INST] " + prompt_prefix + ' {} [/INST]'
35
 
36
 
37
+ myList = [["It is my friends house in England.", "Corrected Sentence: It is my friend's house in England."], ["Every girl must bring their books to school.", "Corrected Sentence: Every girl must bring her books to school."]]
38
  myList = myList + history
39
 
40
 
41
  # Iterates through every past user input and response to be added to the prompt
42
  for user_prompt, bot_response in myList:
43
  prompt += prompt_template.format(user_prompt)
44
+ prompt += f" {bot_response}</s> \n"
45
 
46
  prompt += prompt_template.format(message)
47
  print("PROMPT: \n\t{}\n".format(prompt))
 
60
  generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
61
 
62
  #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
63
+ formatted_prompt = format_prompt_grammar(f"{system_prompt} {prompt}", history)
64
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
65
  output = ""
66
 
 
72
 
73
 
74
  additional_inputs=[
75
+ gr.Textbox( label="System Prompt", value="Corrected Sentence:" , max_lines=1, interactive=True, ),
76
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
77
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
78
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),