chaithanyashaji commited on
Commit
9673d86
·
verified ·
1 Parent(s): 26bd5fc

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +32 -22
main.py CHANGED
@@ -10,9 +10,9 @@ from langchain_huggingface import HuggingFaceEmbeddings
10
  from langchain.text_splitter import RecursiveCharacterTextSplitter
11
  from langchain_community.vectorstores import FAISS
12
  from langchain.prompts import PromptTemplate
 
13
  from langchain.memory import ConversationBufferMemory
14
  from langchain.chains import ConversationalRetrievalChain
15
- from langchain.schema import Document
16
 
17
  # ==========================
18
  # Logging Configuration
@@ -30,12 +30,17 @@ warnings.filterwarnings("ignore", message="You are using `torch.load` with `weig
30
  # Load Environment Variables
31
  # ==========================
32
  load_dotenv()
 
33
  HF_HOME = os.getenv("HF_HOME", "./cache")
34
  os.environ["HF_HOME"] = HF_HOME
35
 
36
  # Ensure the HF_HOME directory exists
37
  os.makedirs(HF_HOME, exist_ok=True)
38
 
 
 
 
 
39
  # ==========================
40
  # Initialize Embeddings
41
  # ==========================
@@ -74,18 +79,32 @@ ANSWER:
74
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
75
 
76
  # ==========================
77
- # Conversational Retrieval Chain
78
  # ==========================
79
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
 
 
 
 
 
 
 
 
 
 
80
 
 
 
 
 
81
  qa = ConversationalRetrievalChain.from_llm(
82
- llm="nomic-ai/nomic-embed-text-v1",
83
  memory=memory,
84
  retriever=db_retriever,
85
  combine_docs_chain_kwargs={"prompt": prompt},
 
86
  )
87
-
88
- logger.info("Conversational Retrieval Chain initialized.")
89
 
90
  # ==========================
91
  # FastAPI Backend
@@ -106,25 +125,16 @@ async def root():
106
  async def chat(request: ChatRequest):
107
  try:
108
  logger.debug(f"Received user question: {request.question}")
109
-
110
- # Retrieve relevant documents
111
- docs = db_retriever.get_relevant_documents(request.question)
112
- if not docs:
113
- logger.warning("No relevant documents found.")
114
- return ChatResponse(answer="I'm sorry, I couldn't find relevant information for your query.")
115
-
116
- # Log retrieved documents for debugging
117
- for i, doc in enumerate(docs, start=1):
118
- logger.debug(f"Retrieved Document {i}: {doc.page_content[:500]}...")
119
-
120
- # Invoke the conversational retrieval chain
121
  result = qa.invoke(input=request.question)
122
- answer = result.get("answer", "I'm sorry, I couldn't find relevant information for your query.")
123
 
124
- # Log the final answer
125
- logger.debug(f"Final Answer: {answer}")
 
 
 
 
 
126
  return ChatResponse(answer=answer)
127
-
128
  except Exception as e:
129
  logger.error(f"Error during chat invocation: {e}")
130
  raise HTTPException(status_code=500, detail="Oops! Something went wrong on our end. Please try again later.")
 
10
  from langchain.text_splitter import RecursiveCharacterTextSplitter
11
  from langchain_community.vectorstores import FAISS
12
  from langchain.prompts import PromptTemplate
13
+ from langchain_together import Together
14
  from langchain.memory import ConversationBufferMemory
15
  from langchain.chains import ConversationalRetrievalChain
 
16
 
17
  # ==========================
18
  # Logging Configuration
 
30
  # Load Environment Variables
31
  # ==========================
32
  load_dotenv()
33
+ TOGETHER_AI_API = os.getenv("TOGETHER_AI")
34
  HF_HOME = os.getenv("HF_HOME", "./cache")
35
  os.environ["HF_HOME"] = HF_HOME
36
 
37
  # Ensure the HF_HOME directory exists
38
  os.makedirs(HF_HOME, exist_ok=True)
39
 
40
+ # Validate required environment variables
41
+ if not TOGETHER_AI_API:
42
+ raise ValueError("The TOGETHER_AI_API environment variable is missing. Please set it in your .env file.")
43
+
44
  # ==========================
45
  # Initialize Embeddings
46
  # ==========================
 
79
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
80
 
81
  # ==========================
82
+ # Initialize Together API
83
  # ==========================
84
+ try:
85
+ llm = Together(
86
+ model="mistralai/Mistral-7B-Instruct-v0.2",
87
+ temperature=0.5,
88
+ max_tokens=1024,
89
+ together_api_key=TOGETHER_AI_API,
90
+ )
91
+ logger.info("Together API successfully initialized.")
92
+ except Exception as e:
93
+ logger.error(f"Error initializing Together API: {e}")
94
+ raise RuntimeError("Something went wrong with the Together API setup. Please verify your API key and configuration.")
95
 
96
+ # ==========================
97
+ # Conversational Retrieval Chain (RAG Implementation)
98
+ # ==========================
99
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
100
  qa = ConversationalRetrievalChain.from_llm(
101
+ llm=llm,
102
  memory=memory,
103
  retriever=db_retriever,
104
  combine_docs_chain_kwargs={"prompt": prompt},
105
+ return_source_documents=True # This enables logging of retrieved content
106
  )
107
+ logger.info("Conversational Retrieval Chain initialized with RAG capabilities.")
 
108
 
109
  # ==========================
110
  # FastAPI Backend
 
125
  async def chat(request: ChatRequest):
126
  try:
127
  logger.debug(f"Received user question: {request.question}")
 
 
 
 
 
 
 
 
 
 
 
 
128
  result = qa.invoke(input=request.question)
 
129
 
130
+ # Log the retrieved source documents for debugging purposes
131
+ source_docs = result.get("source_documents")
132
+ logger.debug(f"Retrieved source documents: {source_docs}")
133
+
134
+ answer = result.get("answer")
135
+ if not answer or "The information is not available in the provided context" in answer:
136
+ answer = "I'm sorry, I couldn't find relevant information for your query. Please try rephrasing or providing more details."
137
  return ChatResponse(answer=answer)
 
138
  except Exception as e:
139
  logger.error(f"Error during chat invocation: {e}")
140
  raise HTTPException(status_code=500, detail="Oops! Something went wrong on our end. Please try again later.")