5to9 commited on
Commit
eea02ee
·
1 Parent(s): 19ef1ca

0.9 logging template func

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -123,8 +123,9 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
123
  new_messages_b = system_prompt_list + chat_history_b + input_text_list
124
 
125
  if "Pharia" in model_id_a:
126
- logging.debug("model a is pharia based, applying own template")
127
  formatted_message_a = apply_chat_template(new_messages_a, add_generation_prompt=True)
 
128
  input_ids_a = tokenizer_b(formatted_message_a, return_tensors="pt").input_ids.to(model_a.device)
129
  else:
130
  input_ids_a = tokenizer_a.apply_chat_template(
@@ -136,6 +137,7 @@ def generate_both(system_prompt, input_text, chatbot_a, chatbot_b, max_new_token
136
  if "Pharia" in model_id_b:
137
  logging.debug("model b is Pharia based, applying own template")
138
  formatted_message_b = apply_chat_template(new_messages_a, add_generation_prompt=True)
 
139
  input_ids_b = tokenizer_b(formatted_message_b, return_tensors="pt").input_ids.to(model_b.device)
140
  else:
141
  input_ids_b = tokenizer_b.apply_chat_template(
 
123
  new_messages_b = system_prompt_list + chat_history_b + input_text_list
124
 
125
  if "Pharia" in model_id_a:
126
+ logging.debug("***** Model a is Pharia based, applying own template")
127
  formatted_message_a = apply_chat_template(new_messages_a, add_generation_prompt=True)
128
+ logging.debug(f"***** formatted message is {formatted_message_a}")
129
  input_ids_a = tokenizer_b(formatted_message_a, return_tensors="pt").input_ids.to(model_a.device)
130
  else:
131
  input_ids_a = tokenizer_a.apply_chat_template(
 
137
  if "Pharia" in model_id_b:
138
  logging.debug("model b is Pharia based, applying own template")
139
  formatted_message_b = apply_chat_template(new_messages_a, add_generation_prompt=True)
140
+ logging.debug(f"***** formatted message is {formatted_message_b}")
141
  input_ids_b = tokenizer_b(formatted_message_b, return_tensors="pt").input_ids.to(model_b.device)
142
  else:
143
  input_ids_b = tokenizer_b.apply_chat_template(