Pijush2023 commited on
Commit
33a1cf5
·
verified ·
1 Parent(s): 05cbb9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -376,14 +376,23 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
376
  elif selected_model == phi_pipe:
377
  # Use Phi-3.5 with its vector store and always use template2
378
  retriever = phi_retriever
379
- prompt_template = QA_CHAIN_PROMPT_2
 
 
 
380
 
381
- # Retrieve context and construct the prompt
382
- context = retriever.get_relevant_documents(message)
383
- prompt = prompt_template.format(context=context, question=message)
 
 
384
 
385
  if selected_model == chat_model:
386
  # Use GPT-4o with Langchain
 
 
 
 
387
  qa_chain = RetrievalQA.from_chain_type(
388
  llm=chat_model,
389
  chain_type="stuff",
@@ -394,7 +403,7 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
394
  return response['result'], extract_addresses(response['result'])
395
 
396
  elif selected_model == phi_pipe:
397
- # Use Phi-3.5 directly with the formatted prompt and specific vector store
398
  response = selected_model(prompt, **{
399
  "max_new_tokens": 300,
400
  "return_full_text": False,
 
376
  elif selected_model == phi_pipe:
377
  # Use Phi-3.5 with its vector store and always use template2
378
  retriever = phi_retriever
379
+ context = retriever.get_relevant_documents(message)
380
+ # Construct a simple, direct prompt for Phi-3.5
381
+ prompt = f"""
382
+ Based on the following information, provide a concise and well-formatted response without including questions or 'Helpful Answer' sections:
383
 
384
+ {context}
385
+
386
+ Information:
387
+ {message}
388
+ """
389
 
390
  if selected_model == chat_model:
391
  # Use GPT-4o with Langchain
392
+ prompt_template = QA_CHAIN_PROMPT_2 # Always using template2 for simplicity
393
+ context = retriever.get_relevant_documents(message)
394
+ prompt = prompt_template.format(context=context, question=message)
395
+
396
  qa_chain = RetrievalQA.from_chain_type(
397
  llm=chat_model,
398
  chain_type="stuff",
 
403
  return response['result'], extract_addresses(response['result'])
404
 
405
  elif selected_model == phi_pipe:
406
+ # Use Phi-3.5 directly with the simplified prompt
407
  response = selected_model(prompt, **{
408
  "max_new_tokens": 300,
409
  "return_full_text": False,