Pijush2023 commited on
Commit
404b94f
·
verified ·
1 Parent(s): eeeece1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -23
app.py CHANGED
@@ -368,31 +368,20 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
368
  response = fetch_google_flights()
369
  return response, extract_addresses(response)
370
 
371
- # Use different retrievers based on the selected model
372
- if selected_model == phi_pipe:
373
- retriever = phi_retriever
374
- # Retrieve context from vector store
375
- context = retriever.get_relevant_documents(message)
376
- # Construct a prompt with clear formatting instructions
377
- prompt = f"""
378
- Based on the provided documents, generate a well-formatted response with the following details:
379
- {context}
380
-
381
- Please format the output as follows:
382
- Name: [Event Name]
383
- Location: [Location]
384
- Date and Time: [Date and Time]
385
- Description: [Event Description]
386
-
387
- Question: {message}
388
- """
389
- else:
390
- retriever = gpt_retriever
391
- prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
392
  context = retriever.get_relevant_documents(message)
393
  prompt = prompt_template.format(context=context, question=message)
394
 
395
- if retrieval_mode == "VDB":
396
  if selected_model == chat_model:
397
  # Use GPT-4o with Langchain
398
  qa_chain = RetrievalQA.from_chain_type(
@@ -425,7 +414,6 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
425
  return "Sorry, I encountered an error while processing your request.", []
426
 
427
 
428
-
429
  # def bot(history, choice, tts_choice, retrieval_mode):
430
  # if not history:
431
  # return history
 
368
  response = fetch_google_flights()
369
  return response, extract_addresses(response)
370
 
371
+ if retrieval_mode == "VDB":
372
+ if selected_model == chat_model:
373
+ # Use GPT-4o with its vector store and template
374
+ retriever = gpt_retriever
375
+ prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
376
+ elif selected_model == phi_pipe:
377
+ # Use Phi-3.5 with its vector store and always use template2
378
+ retriever = phi_retriever
379
+ prompt_template = QA_CHAIN_PROMPT_2
380
+
381
+ # Retrieve context and construct the prompt
 
 
 
 
 
 
 
 
 
 
382
  context = retriever.get_relevant_documents(message)
383
  prompt = prompt_template.format(context=context, question=message)
384
 
 
385
  if selected_model == chat_model:
386
  # Use GPT-4o with Langchain
387
  qa_chain = RetrievalQA.from_chain_type(
 
414
  return "Sorry, I encountered an error while processing your request.", []
415
 
416
 
 
417
  # def bot(history, choice, tts_choice, retrieval_mode):
418
  # if not history:
419
  # return history