chaithanyashaji commited on
Commit
26bd5fc
·
verified ·
1 Parent(s): 65bd972

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +21 -30
main.py CHANGED
@@ -10,9 +10,9 @@ from langchain_huggingface import HuggingFaceEmbeddings
10
  from langchain.text_splitter import RecursiveCharacterTextSplitter
11
  from langchain_community.vectorstores import FAISS
12
  from langchain.prompts import PromptTemplate
13
- from langchain_together import Together
14
  from langchain.memory import ConversationBufferMemory
15
  from langchain.chains import ConversationalRetrievalChain
 
16
 
17
  # ==========================
18
  # Logging Configuration
@@ -30,17 +30,12 @@ warnings.filterwarnings("ignore", message="You are using `torch.load` with `weig
30
  # Load Environment Variables
31
  # ==========================
32
  load_dotenv()
33
- TOGETHER_AI_API = os.getenv("TOGETHER_AI")
34
  HF_HOME = os.getenv("HF_HOME", "./cache")
35
  os.environ["HF_HOME"] = HF_HOME
36
 
37
  # Ensure the HF_HOME directory exists
38
  os.makedirs(HF_HOME, exist_ok=True)
39
 
40
- # Validate required environment variables
41
- if not TOGETHER_AI_API:
42
- raise ValueError("The TOGETHER_AI_API environment variable is missing. Please set it in your .env file.")
43
-
44
  # ==========================
45
  # Initialize Embeddings
46
  # ==========================
@@ -76,34 +71,20 @@ QUESTION: {question}
76
  ANSWER:
77
  </s>[INST]"""
78
 
79
-
80
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
81
 
82
- # ==========================
83
- # Initialize Together API
84
- # ==========================
85
- try:
86
- llm = Together(
87
- model="mistralai/Mistral-7B-Instruct-v0.2",
88
- temperature=0.5,
89
- max_tokens=1024,
90
- together_api_key=TOGETHER_AI_API,
91
- )
92
- logger.info("Together API successfully initialized.")
93
- except Exception as e:
94
- logger.error(f"Error initializing Together API: {e}")
95
- raise RuntimeError("Something went wrong with the Together API setup. Please verify your API key and configuration.")
96
-
97
  # ==========================
98
  # Conversational Retrieval Chain
99
  # ==========================
100
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
 
101
  qa = ConversationalRetrievalChain.from_llm(
102
- llm=llm,
103
  memory=memory,
104
  retriever=db_retriever,
105
  combine_docs_chain_kwargs={"prompt": prompt},
106
  )
 
107
  logger.info("Conversational Retrieval Chain initialized.")
108
 
109
  # ==========================
@@ -125,21 +106,31 @@ async def root():
125
  async def chat(request: ChatRequest):
126
  try:
127
  logger.debug(f"Received user question: {request.question}")
 
 
 
 
 
 
 
 
 
 
 
 
128
  result = qa.invoke(input=request.question)
129
- answer = result.get("answer")
130
- if not answer or "The information is not available in the provided context" in answer:
131
- answer = "I'm sorry, I couldn't find relevant information for your query. Please try rephrasing or providing more details."
 
132
  return ChatResponse(answer=answer)
 
133
  except Exception as e:
134
  logger.error(f"Error during chat invocation: {e}")
135
  raise HTTPException(status_code=500, detail="Oops! Something went wrong on our end. Please try again later.")
136
 
137
-
138
  # ==========================
139
  # Run Uvicorn Server
140
  # ==========================
141
  if __name__ == "__main__":
142
  uvicorn.run("main:app", host="0.0.0.0", port=7860)
143
-
144
-
145
-
 
10
  from langchain.text_splitter import RecursiveCharacterTextSplitter
11
  from langchain_community.vectorstores import FAISS
12
  from langchain.prompts import PromptTemplate
 
13
  from langchain.memory import ConversationBufferMemory
14
  from langchain.chains import ConversationalRetrievalChain
15
+ from langchain.schema import Document
16
 
17
  # ==========================
18
  # Logging Configuration
 
30
  # Load Environment Variables
31
  # ==========================
32
  load_dotenv()
 
33
  HF_HOME = os.getenv("HF_HOME", "./cache")
34
  os.environ["HF_HOME"] = HF_HOME
35
 
36
  # Ensure the HF_HOME directory exists
37
  os.makedirs(HF_HOME, exist_ok=True)
38
 
 
 
 
 
39
  # ==========================
40
  # Initialize Embeddings
41
  # ==========================
 
71
  ANSWER:
72
  </s>[INST]"""
73
 
 
74
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  # ==========================
77
  # Conversational Retrieval Chain
78
  # ==========================
79
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
80
+
81
  qa = ConversationalRetrievalChain.from_llm(
82
+ llm="nomic-ai/nomic-embed-text-v1",
83
  memory=memory,
84
  retriever=db_retriever,
85
  combine_docs_chain_kwargs={"prompt": prompt},
86
  )
87
+
88
  logger.info("Conversational Retrieval Chain initialized.")
89
 
90
  # ==========================
 
106
  async def chat(request: ChatRequest):
107
  try:
108
  logger.debug(f"Received user question: {request.question}")
109
+
110
+ # Retrieve relevant documents
111
+ docs = db_retriever.get_relevant_documents(request.question)
112
+ if not docs:
113
+ logger.warning("No relevant documents found.")
114
+ return ChatResponse(answer="I'm sorry, I couldn't find relevant information for your query.")
115
+
116
+ # Log retrieved documents for debugging
117
+ for i, doc in enumerate(docs, start=1):
118
+ logger.debug(f"Retrieved Document {i}: {doc.page_content[:500]}...")
119
+
120
+ # Invoke the conversational retrieval chain
121
  result = qa.invoke(input=request.question)
122
+ answer = result.get("answer", "I'm sorry, I couldn't find relevant information for your query.")
123
+
124
+ # Log the final answer
125
+ logger.debug(f"Final Answer: {answer}")
126
  return ChatResponse(answer=answer)
127
+
128
  except Exception as e:
129
  logger.error(f"Error during chat invocation: {e}")
130
  raise HTTPException(status_code=500, detail="Oops! Something went wrong on our end. Please try again later.")
131
 
 
132
  # ==========================
133
  # Run Uvicorn Server
134
  # ==========================
135
  if __name__ == "__main__":
136
  uvicorn.run("main:app", host="0.0.0.0", port=7860)