Pijush2023 commited on
Commit
3da746f
·
verified ·
1 Parent(s): 98bc14b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -25
app.py CHANGED
@@ -1015,27 +1015,37 @@ def retriever_neo4j(question: str):
1015
  structured_data = structured_retriever(question)
1016
  return structured_data
1017
 
1018
- template = """Answer the question based only on the following context:
1019
- {context}
1020
- Question: {question}
1021
- Use natural language and be concise.
1022
- Answer:"""
1023
-
1024
- qa_prompt = ChatPromptTemplate.from_template(template)
1025
-
1026
- chain_neo4j = (
1027
- RunnableParallel(
1028
- {
1029
- "context": _search_query | retriever_neo4j,
1030
- "question": RunnablePassthrough(),
1031
- }
1032
- )
1033
- | qa_prompt
1034
- | chat_model
1035
- | StrOutputParser()
 
 
 
 
 
 
 
 
 
 
 
1036
  )
1037
 
1038
- # Define a function to select between Pinecone and Neo4j
1039
  def generate_answer(message, choice, retrieval_mode):
1040
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
1041
 
@@ -1049,12 +1059,21 @@ def generate_answer(message, choice, retrieval_mode):
1049
  chain_type_kwargs={"prompt": prompt_template}
1050
  )
1051
  response = qa_chain({"query": message})
1052
- return response['result'], extract_addresses(response['result'])
1053
  elif retrieval_mode == "Knowledge-Graph":
1054
- context = retriever_neo4j(message)
1055
- qa_chain = ChatPromptTemplate.from_template(prompt_template.template)
1056
- response = qa_chain.invoke({"context": context, "question": message})
1057
- return response['result'], extract_addresses(response['result'])
 
 
 
 
 
 
 
 
 
1058
  else:
1059
  return "Invalid retrieval mode selected.", []
1060
 
@@ -1255,7 +1274,7 @@ def show_map_if_details(history, choice):
1255
  if choice in ["Details", "Conversational"]:
1256
  return gr.update(visible=True), update_map_with_response(history)
1257
  else:
1258
- return gr.update(visible(False)), ""
1259
 
1260
  def generate_audio_elevenlabs(text):
1261
  XI_API_KEY = os.environ['ELEVENLABS_API']
 
1015
  structured_data = structured_retriever(question)
1016
  return structured_data
1017
 
1018
+ _template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question,
1019
+ in its original language.
1020
+ Chat History:
1021
+ {chat_history}
1022
+ Follow Up Input: {question}
1023
+ Standalone question:"""
1024
+
1025
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
1026
+
1027
+ def _format_chat_history(chat_history: list[tuple[str, str]]) -> list:
1028
+ buffer = []
1029
+ for human, ai in chat_history:
1030
+ buffer.append(HumanMessage(content=human))
1031
+ buffer.append(AIMessage(content=ai))
1032
+ return buffer
1033
+
1034
+ _search_query = RunnableBranch(
1035
+ (
1036
+ RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config(
1037
+ run_name="HasChatHistoryCheck"
1038
+ ),
1039
+ RunnablePassthrough.assign(
1040
+ chat_history=lambda x: _format_chat_history(x["chat_history"])
1041
+ )
1042
+ | CONDENSE_QUESTION_PROMPT
1043
+ | ChatOpenAI(temperature=0, api_key=os.environ['OPENAI_API_KEY'])
1044
+ | StrOutputParser(),
1045
+ ),
1046
+ RunnableLambda(lambda x : x["question"]),
1047
  )
1048
 
 
1049
  def generate_answer(message, choice, retrieval_mode):
1050
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
1051
 
 
1059
  chain_type_kwargs={"prompt": prompt_template}
1060
  )
1061
  response = qa_chain({"query": message})
1062
+ return response['output'], extract_addresses(response['output'])
1063
  elif retrieval_mode == "Knowledge-Graph":
1064
+ chain_neo4j = (
1065
+ RunnableParallel(
1066
+ {
1067
+ "context": _search_query | retriever_neo4j,
1068
+ "question": RunnablePassthrough(),
1069
+ }
1070
+ )
1071
+ | prompt_template
1072
+ | chat_model
1073
+ | StrOutputParser()
1074
+ )
1075
+ response = chain_neo4j.invoke({"question": message})
1076
+ return response['output'], extract_addresses(response['output'])
1077
  else:
1078
  return "Invalid retrieval mode selected.", []
1079
 
 
1274
  if choice in ["Details", "Conversational"]:
1275
  return gr.update(visible=True), update_map_with_response(history)
1276
  else:
1277
+ return gr.update(visible=False), ""
1278
 
1279
  def generate_audio_elevenlabs(text):
1280
  XI_API_KEY = os.environ['ELEVENLABS_API']