skoneru commited on
Commit
1f72f50
·
verified ·
1 Parent(s): 88bd624

Update app.py

Browse files

Simple Web interface for Vicuna

Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -119,7 +119,7 @@ def get_menu():
119
  return menu
120
 
121
  def reply_bot(message, history):
122
- menu = get_menu()
123
 
124
  client = InferenceClient(model="https://8cc9-141-3-25-29.ngrok-free.app")
125
 
@@ -133,7 +133,8 @@ def reply_bot(message, history):
133
  curr_prompt = system_prompt + message + " [/INST]"
134
  else:
135
  curr_prompt = "<s>[INST] <<SYS>>\nYou are multilingual chat bot that helps deciding what to eat in a german canteen. In the canteen, there are different lines with names and each line may offer several food people can choose from or only one. Based on the menu and question, you suggest the user which line they should go to. You respond really briefly and do not generate long responses. You can only suggest them from the menu and which line they can go to. Nothing else!\n<</SYS>>\n\nMenu:\n" + menu + "\n" + message + " [/INST]"
136
-
 
137
  try:
138
  print(curr_prompt)
139
  #answer = client.text_generation(prompt=prompt, max_new_tokens=512)
 
119
  return menu
120
 
121
  def reply_bot(message, history):
122
+ #menu = get_menu()
123
 
124
  client = InferenceClient(model="https://8cc9-141-3-25-29.ngrok-free.app")
125
 
 
133
  curr_prompt = system_prompt + message + " [/INST]"
134
  else:
135
  curr_prompt = "<s>[INST] <<SYS>>\nYou are multilingual chat bot that helps deciding what to eat in a german canteen. In the canteen, there are different lines with names and each line may offer several food people can choose from or only one. Based on the menu and question, you suggest the user which line they should go to. You respond really briefly and do not generate long responses. You can only suggest them from the menu and which line they can go to. Nothing else!\n<</SYS>>\n\nMenu:\n" + menu + "\n" + message + " [/INST]"
136
+
137
+ curr_prompt = message
138
  try:
139
  print(curr_prompt)
140
  #answer = client.text_generation(prompt=prompt, max_new_tokens=512)