mateoluksenberg commited on
Commit
d413725
·
verified ·
1 Parent(s): f4520a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -212,7 +212,7 @@ EXAMPLES = [
212
  ]
213
 
214
  @spaces.GPU()
215
- def simple_chat(message, history: list, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
216
 
217
  model = AutoModelForCausalLM.from_pretrained(
218
  MODEL_ID,
@@ -222,7 +222,7 @@ def simple_chat(message, history: list, temperature: float = 0.8, max_length: in
222
  )
223
 
224
  print(f'message is - {message}')
225
- print(f'history is - {history}')
226
  conversation = []
227
  prompt_files = []
228
  if message["files"]:
@@ -233,13 +233,7 @@ def simple_chat(message, history: list, temperature: float = 0.8, max_length: in
233
  format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message['text']
234
  conversation.append({"role": "user", "content": format_msg})
235
  else:
236
- if len(history) == 0:
237
- # raise gr.Error("Please upload an image first.")
238
- contents = None
239
- conversation.append({"role": "user", "content": message['text']})
240
- else:
241
- # image = Image.open(history[0][0][0])
242
- for prompt, answer in history:
243
  if answer is None:
244
  prompt_files.append(prompt[0])
245
  conversation.extend([{"role": "user", "content": ""}, {"role": "assistant", "content": ""}])
@@ -257,6 +251,7 @@ def simple_chat(message, history: list, temperature: float = 0.8, max_length: in
257
  elif choice == "doc":
258
  format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message['text']
259
  conversation.append({"role": "user", "content": format_msg})
 
260
  print(f"Conversation is -\n{conversation}")
261
 
262
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True,
 
212
  ]
213
 
214
  @spaces.GPU()
215
+ def simple_chat(message, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
216
 
217
  model = AutoModelForCausalLM.from_pretrained(
218
  MODEL_ID,
 
222
  )
223
 
224
  print(f'message is - {message}')
225
+
226
  conversation = []
227
  prompt_files = []
228
  if message["files"]:
 
233
  format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message['text']
234
  conversation.append({"role": "user", "content": format_msg})
235
  else:
236
+ for prompt, answer in history:
 
 
 
 
 
 
237
  if answer is None:
238
  prompt_files.append(prompt[0])
239
  conversation.extend([{"role": "user", "content": ""}, {"role": "assistant", "content": ""}])
 
251
  elif choice == "doc":
252
  format_msg = contents + "\n\n\n" + "{} files uploaded.\n" + message['text']
253
  conversation.append({"role": "user", "content": format_msg})
254
+
255
  print(f"Conversation is -\n{conversation}")
256
 
257
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True,