Pijush2023 commited on
Commit
f125575
·
verified ·
1 Parent(s): 66fee5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -5
app.py CHANGED
@@ -613,7 +613,9 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
613
 
614
  if retrieval_mode == "VDB":
615
  logging.debug("Using VDB retrieval mode")
616
- if selected_model in [gpt_model, gpt_mini_model]:
 
 
617
  retriever = gpt_retriever
618
  context = retriever.get_relevant_documents(message)
619
  logging.debug(f"Retrieved context: {context}")
@@ -631,13 +633,16 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
631
  logging.debug(f"LM-1 or LM-3 response: {response}")
632
  return response['result'], extract_addresses(response['result'])
633
 
634
- elif selected_model == phi_pipe:
 
635
  retriever = phi_retriever
636
  context_documents = retriever.get_relevant_documents(message)
637
  context = "\n".join([doc.page_content for doc in context_documents])
638
  logging.debug(f"Retrieved context for LM-2: {context}")
639
 
640
  prompt = phi_custom_template.format(context=context, question=message)
 
 
641
  response = selected_model(prompt, **{
642
  "max_new_tokens": 400,
643
  "return_full_text": True,
@@ -647,6 +652,7 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
647
 
648
  if response:
649
  generated_text = response[0]['generated_text']
 
650
  cleaned_response = clean_response(generated_text)
651
  return cleaned_response, extract_addresses(cleaned_response)
652
  else:
@@ -654,7 +660,9 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
654
  return "No response generated.", []
655
 
656
  elif retrieval_mode == "KGF":
 
657
  response = chain_neo4j.invoke({"question": message})
 
658
  return response, extract_addresses(response)
659
  else:
660
  logging.error("Invalid retrieval mode selected.")
@@ -668,6 +676,7 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
668
 
669
 
670
 
 
671
  def add_message(history, message):
672
  history.append((message, None))
673
  return history, gr.Textbox(value="", interactive=True, show_label=False)
@@ -1141,16 +1150,28 @@ def handle_retrieval_mode_change(choice):
1141
 
1142
 
1143
 
 
 
 
 
 
 
 
 
 
 
 
 
1144
  def handle_model_choice_change(selected_model):
1145
  if selected_model == "LM-2":
1146
  # Disable retrieval mode and select style when LM-2 is selected
1147
  return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
1148
- elif selected_model == "LM-1":
1149
- # Enable retrieval mode and select style for LM-1
1150
  return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1151
  else:
1152
  # Default case: allow interaction
1153
- return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1154
 
1155
 
1156
 
 
613
 
614
  if retrieval_mode == "VDB":
615
  logging.debug("Using VDB retrieval mode")
616
+ # Adjust this block to handle both LM-1 and LM-3
617
+ if selected_model in [gpt_model, gpt_mini_model]: # Both LM-1 and LM-3 should use the same logic
618
+ logging.debug(f"Selected model: {'LM-1' if selected_model == gpt_model else 'LM-3'}")
619
  retriever = gpt_retriever
620
  context = retriever.get_relevant_documents(message)
621
  logging.debug(f"Retrieved context: {context}")
 
633
  logging.debug(f"LM-1 or LM-3 response: {response}")
634
  return response['result'], extract_addresses(response['result'])
635
 
636
+ elif selected_model == phi_pipe: # LM-2 specific logic
637
+ logging.debug("Selected model: LM-2")
638
  retriever = phi_retriever
639
  context_documents = retriever.get_relevant_documents(message)
640
  context = "\n".join([doc.page_content for doc in context_documents])
641
  logging.debug(f"Retrieved context for LM-2: {context}")
642
 
643
  prompt = phi_custom_template.format(context=context, question=message)
644
+ logging.debug(f"Generated LM-2 prompt: {prompt}")
645
+
646
  response = selected_model(prompt, **{
647
  "max_new_tokens": 400,
648
  "return_full_text": True,
 
652
 
653
  if response:
654
  generated_text = response[0]['generated_text']
655
+ logging.debug(f"LM-2 Response: {generated_text}")
656
  cleaned_response = clean_response(generated_text)
657
  return cleaned_response, extract_addresses(cleaned_response)
658
  else:
 
660
  return "No response generated.", []
661
 
662
  elif retrieval_mode == "KGF":
663
+ logging.debug("Using KGF retrieval mode")
664
  response = chain_neo4j.invoke({"question": message})
665
+ logging.debug(f"KGF response: {response}")
666
  return response, extract_addresses(response)
667
  else:
668
  logging.error("Invalid retrieval mode selected.")
 
676
 
677
 
678
 
679
+
680
  def add_message(history, message):
681
  history.append((message, None))
682
  return history, gr.Textbox(value="", interactive=True, show_label=False)
 
1150
 
1151
 
1152
 
1153
+ # def handle_model_choice_change(selected_model):
1154
+ # if selected_model == "LM-2":
1155
+ # # Disable retrieval mode and select style when LM-2 is selected
1156
+ # return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
1157
+ # elif selected_model == "LM-1":
1158
+ # # Enable retrieval mode and select style for LM-1
1159
+ # return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1160
+ # else:
1161
+ # # Default case: allow interaction
1162
+ # return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1163
+
1164
+
1165
  def handle_model_choice_change(selected_model):
1166
  if selected_model == "LM-2":
1167
  # Disable retrieval mode and select style when LM-2 is selected
1168
  return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
1169
+ elif selected_model in ["LM-1", "LM-3"]:
1170
+ # Enable retrieval mode and select style for LM-1 and LM-3
1171
  return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1172
  else:
1173
  # Default case: allow interaction
1174
+ return gr.update(interactive=True), gr.update(interactive(True), gr.update(interactive=True)
1175
 
1176
 
1177