ColeGuion commited on
Commit
716b126
·
verified ·
1 Parent(s): 2915268

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -51,10 +51,7 @@ def format_prompt(message, history):
51
  print("PROMPT: \n\t{}\n".format(prompt))
52
  return prompt
53
 
54
- def format_my_prompt(user_input):
55
- # Formatting the prompt as per the new template
56
- prompt = f"<s> [INST] Please correct the grammatical errors in the following sentence: {user_input} [/INST] Model answer</s> [INST] Return only the grammatically corrected sentence. [/INST]"
57
- return prompt
58
 
59
 
60
  def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
@@ -67,7 +64,6 @@ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256
67
  generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
68
 
69
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
70
- #formatted_prompt = format_my_prompt(prompt)
71
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
72
  output = ""
73
 
 
51
  print("PROMPT: \n\t{}\n".format(prompt))
52
  return prompt
53
 
54
+
 
 
 
55
 
56
 
57
  def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
 
64
  generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
65
 
66
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
 
67
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
68
  output = ""
69