Pijush2023 commited on
Commit
c35991f
·
verified ·
1 Parent(s): 5bd2e9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -104,15 +104,15 @@ def initialize_gpt_model():
104
  def initialize_gpt_mini_model():
105
  return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o-mini')
106
 
107
- # Initialize the GPT-4o-mini model
108
- gpt_mini_model = initialize_gpt_mini_model()
109
 
110
 
111
 
112
 
113
- # Initialize both models
 
114
  phi_pipe = initialize_phi_model()
115
  gpt_model = initialize_gpt_model()
 
116
 
117
 
118
  # Existing embeddings and vector store for GPT-4o
@@ -125,6 +125,11 @@ phi_embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
125
  phi_vectorstore = PineconeVectorStore(index_name="phivector08252024", embedding=phi_embeddings)
126
  phi_retriever = phi_vectorstore.as_retriever(search_kwargs={'k': 5})
127
 
 
 
 
 
 
128
 
129
 
130
  # Pinecone setup
@@ -626,10 +631,10 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
626
  # Retrieval-based response
627
  if retrieval_mode == "VDB":
628
  logging.debug("Using VDB retrieval mode")
629
- retriever = gpt_retriever # Use the same retriever for all GPT models
630
  context = retriever.get_relevant_documents(message)
631
  logging.debug(f"Retrieved context: {context}")
632
 
 
633
  prompt = prompt_template.format(context=context, question=message)
634
  logging.debug(f"Generated prompt: {prompt}")
635
 
@@ -640,7 +645,7 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
640
  chain_type_kwargs={"prompt": prompt_template}
641
  )
642
  response = qa_chain({"query": message})
643
- logging.debug(f"Response from {selected_model}: {response}")
644
  return response['result'], extract_addresses(response['result'])
645
 
646
  elif retrieval_mode == "KGF":
@@ -659,7 +664,6 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
659
 
660
 
661
 
662
-
663
  def add_message(history, message):
664
  history.append((message, None))
665
  return history, gr.Textbox(value="", interactive=True, show_label=False)
 
104
  def initialize_gpt_mini_model():
105
  return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o-mini')
106
 
 
 
107
 
108
 
109
 
110
 
111
+
112
+ # Initialize all models
113
  phi_pipe = initialize_phi_model()
114
  gpt_model = initialize_gpt_model()
115
+ gpt_mini_model = initialize_gpt_mini_model()
116
 
117
 
118
  # Existing embeddings and vector store for GPT-4o
 
125
  phi_vectorstore = PineconeVectorStore(index_name="phivector08252024", embedding=phi_embeddings)
126
  phi_retriever = phi_vectorstore.as_retriever(search_kwargs={'k': 5})
127
 
128
+ #Existing embeddings and vector store for GPT-4o-mini
129
+ gpt_mini_embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
130
+ gpt_mini_vectorstore = PineconeVectorStore(index_name="radarfinaldata08192024", embedding=gpt_mini_embeddings)
131
+ gpt_mini_retriever = gpt_mini_vectorstore.as_retriever(search_kwargs={'k': 5})
132
+
133
 
134
 
135
  # Pinecone setup
 
631
  # Retrieval-based response
632
  if retrieval_mode == "VDB":
633
  logging.debug("Using VDB retrieval mode")
 
634
  context = retriever.get_relevant_documents(message)
635
  logging.debug(f"Retrieved context: {context}")
636
 
637
+ prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
638
  prompt = prompt_template.format(context=context, question=message)
639
  logging.debug(f"Generated prompt: {prompt}")
640
 
 
645
  chain_type_kwargs={"prompt": prompt_template}
646
  )
647
  response = qa_chain({"query": message})
648
+ logging.debug(f"Response: {response}")
649
  return response['result'], extract_addresses(response['result'])
650
 
651
  elif retrieval_mode == "KGF":
 
664
 
665
 
666
 
 
667
  def add_message(history, message):
668
  history.append((message, None))
669
  return history, gr.Textbox(value="", interactive=True, show_label=False)