|  | import os | 
					
						
						|  | from langchain_openai import ChatOpenAI | 
					
						
						|  | from langchain_core.runnables import RunnableLambda | 
					
						
						|  | from langchain_core.output_parsers import StrOutputParser | 
					
						
						|  | from tavily import TavilyClient | 
					
						
						|  | from dotenv import load_dotenv | 
					
						
						|  | import datetime | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | load_dotenv() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | 
					
						
						|  | TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") | 
					
						
						|  |  | 
					
						
						|  | if not OPENAI_API_KEY or not TAVILY_API_KEY: | 
					
						
						|  | raise ValueError("โ API keys are missing! Please check your .env file.") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | tavily_client = TavilyClient(api_key=TAVILY_API_KEY) | 
					
						
						|  |  | 
					
						
						|  | llm = ChatOpenAI( | 
					
						
						|  | model_name="llama3-8b-8192", | 
					
						
						|  | temperature=0, | 
					
						
						|  | streaming=False, | 
					
						
						|  | openai_api_key=OPENAI_API_KEY, | 
					
						
						|  | openai_api_base="https://api.groq.com/openai/v1" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def search_web_with_tavily(query): | 
					
						
						|  | if len(query) < 5: | 
					
						
						|  | return "" | 
					
						
						|  |  | 
					
						
						|  | print(f"๐ Sending query to Tavily: {query}") | 
					
						
						|  | search_results = tavily_client.search(query=query, max_results=3) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | snippets = [f"{result['title']}: {result['content']}" for result in search_results['results'] if 'content' in result] | 
					
						
						|  |  | 
					
						
						|  | print("โ
 Web search results retrieved!") | 
					
						
						|  | return "\n".join(snippets) if snippets else "" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def prompt_fn(query: str, context: str, web_context: str = "") -> str: | 
					
						
						|  | """ | 
					
						
						|  | This is the main prompt template for the AI assistant. | 
					
						
						|  |  | 
					
						
						|  | The assistant must: | 
					
						
						|  | - Prioritize university knowledge first. | 
					
						
						|  | - Use web search only if internal knowledge is insufficient. | 
					
						
						|  | - If no relevant information is found, respond with: | 
					
						
						|  | "Iโm sorry, but I donโt have information on this topic." | 
					
						
						|  | - Avoid unnecessary introductions, greetings, or explanations. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | search_part = f"\nAdditionally, I found the following information from the web:\n{web_context}\n" if web_context else "" | 
					
						
						|  |  | 
					
						
						|  | return f""" | 
					
						
						|  | Below is the available information for answering student inquiries about Vistula University. | 
					
						
						|  |  | 
					
						
						|  | ๐น Follow this order when answering: | 
					
						
						|  | 1๏ธโฃ **Use internal university knowledge first.** | 
					
						
						|  | 2๏ธโฃ **If internal data lacks relevant details, use web search results.** | 
					
						
						|  | 3๏ธโฃ **If no useful information is found, respond with: "Iโm sorry, but I donโt have information on this topic."** | 
					
						
						|  |  | 
					
						
						|  | ๐น Important Rules: | 
					
						
						|  | - **Do not start with introductions.** Provide the answer directly. | 
					
						
						|  | - **If no information is available, do not add lengthy explanations.** | 
					
						
						|  | - **Never make up or guess information.** | 
					
						
						|  |  | 
					
						
						|  | ๐น Available Information: | 
					
						
						|  | {context} | 
					
						
						|  | {search_part} | 
					
						
						|  |  | 
					
						
						|  | ๐น Question: | 
					
						
						|  | {query} | 
					
						
						|  |  | 
					
						
						|  | --- | 
					
						
						|  | โ **If no relevant information is found, simply say:** | 
					
						
						|  | - "Iโm sorry, but I donโt have information on this topic." | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | prompt_runnable = RunnableLambda(lambda inputs: prompt_fn(inputs["query"], inputs["context"], inputs.get("web_context", ""))) | 
					
						
						|  | rag_chain = prompt_runnable | llm | StrOutputParser() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def generate_response(retriever, query): | 
					
						
						|  |  | 
					
						
						|  | if len(query.split()) <= 2 or query.lower() in ["hi", "hello", "help", "hey", "merhaba"]: | 
					
						
						|  | return "๐ Hi there! How can I assist you today? Please ask me a specific question about Vistula University." | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | relevant_docs = retriever.invoke(query) | 
					
						
						|  | context = "\n".join([doc.page_content for doc in relevant_docs]) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if not relevant_docs or len(context.strip()) < 20: | 
					
						
						|  | return "Iโm sorry, but I donโt have information on this topic." | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | inputs = {"query": query, "context": context} | 
					
						
						|  | response = rag_chain.invoke(inputs).strip() | 
					
						
						|  |  | 
					
						
						|  | return response if response else "Iโm sorry, but I donโt have information on this topic." | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def log_interaction(question, answer, source): | 
					
						
						|  | log_folder = "logs" | 
					
						
						|  | os.makedirs(log_folder, exist_ok=True) | 
					
						
						|  |  | 
					
						
						|  | log_file = os.path.join(log_folder, "chat_log.txt") | 
					
						
						|  |  | 
					
						
						|  | with open(log_file, "a", encoding="utf-8") as f: | 
					
						
						|  | timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") | 
					
						
						|  | f.write(f"{timestamp} | Question: {question}\n") | 
					
						
						|  | f.write(f"{timestamp} | Answer: {answer}\n") | 
					
						
						|  | f.write(f"{timestamp} | Source: {source}\n") | 
					
						
						|  | f.write("-" * 80 + "\n") | 
					
						
						|  |  |