Daemontatox commited on
Commit
4a3d0de
·
verified ·
1 Parent(s): f94101f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -24
app.py CHANGED
@@ -52,23 +52,6 @@ class ChatHistory:
52
  def clear(self):
53
  self.messages = []
54
 
55
- # Function to log questions and answers to a file
56
- def log_to_file(question: str, answer: str):
57
- """Logs the question and answer to a file with a timestamp."""
58
- try:
59
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
60
- log_entry = f"Timestamp: {timestamp}\nQuestion: {question}\nAnswer: {answer}\n\n"
61
-
62
- # Debugging: Print the log entry to verify its content
63
- logger.info(f"Log Entry: {log_entry}")
64
-
65
- # Open the file in append mode and write the log entry
66
- with open("Logs.txt", "a") as log_file:
67
- log_file.write(log_entry)
68
- logger.info("Successfully wrote to Logs.txt")
69
- except Exception as e:
70
- logger.error(f"Failed to write to Logs.txt: {e}")
71
-
72
  # Load environment variables and setup
73
  load_dotenv()
74
 
@@ -92,6 +75,7 @@ except Exception as e:
92
  logger.error("Failed to connect to Qdrant.")
93
  exit(1)
94
 
 
95
  collection_name = "mawared"
96
 
97
  try:
@@ -118,9 +102,54 @@ retriever = db.as_retriever(
118
  search_kwargs={"k": 5}
119
  )
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  llm = ChatGoogleGenerativeAI(
122
  model="gemini-2.0-flash-thinking-exp-01-21",
123
- temperature=0.2,
124
  max_tokens=None,
125
  timeout=None,
126
  max_retries=2,
@@ -131,7 +160,6 @@ llm = ChatGoogleGenerativeAI(
131
  template = """
132
  You are a specialized AI assistant for the Mawared HR System, designed to deliver accurate and contextually relevant support based solely on the provided context and chat history.
133
 
134
-
135
  ---
136
 
137
  Core Principles
@@ -196,8 +224,8 @@ Critical Constraints
196
 
197
  - Strict Context Reliance: Base all responses solely on the provided context and chat history.
198
  - Non-Mawared HR Queries: Politely decline to answer questions unrelated to Mawared HR.
199
- - Answer Format: Always provide accurate answers in numbered steps without using code.
200
- - Use Tags like <Thinking> and <Processing> to indicate your process.
201
  ---
202
 
203
  By adhering to these principles and guidelines, ensure every response is accurate, professional, and easy to follow.
@@ -277,9 +305,9 @@ def ask_question_gradio(question: str, history: List[List[str]]) -> Generator[tu
277
  # Add final response to chat history
278
  chat_history.add_message("assistant", response)
279
 
280
- # Log the question and answer to a file
281
- logger.info("Attempting to log question and answer to Logs.txt")
282
- log_to_file(question, response)
283
 
284
  except Exception as e:
285
  logger.error(f"Error during question processing: {e}")
 
52
  def clear(self):
53
  self.messages = []
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  # Load environment variables and setup
56
  load_dotenv()
57
 
 
75
  logger.error("Failed to connect to Qdrant.")
76
  exit(1)
77
 
78
+ # Create the main collection for Mawared HR
79
  collection_name = "mawared"
80
 
81
  try:
 
102
  search_kwargs={"k": 5}
103
  )
104
 
105
+ # Create a new collection for logs
106
+ logs_collection_name = "mawared_logs"
107
+
108
+ try:
109
+ client.create_collection(
110
+ collection_name=logs_collection_name,
111
+ vectors_config=models.VectorParams(
112
+ size=384, # Same size as embeddings
113
+ distance=models.Distance.COSINE
114
+ )
115
+ )
116
+ logger.info(f"Created new Qdrant collection: {logs_collection_name}")
117
+ except Exception as e:
118
+ if "already exists" not in str(e):
119
+ logger.error(f"Error creating logs collection: {e}")
120
+ exit(1)
121
+
122
+ def log_to_qdrant(question: str, answer: str):
123
+ """Logs the question and answer to the Qdrant logs collection."""
124
+ try:
125
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
126
+ log_entry = {
127
+ "question": question,
128
+ "answer": answer,
129
+ "timestamp": timestamp
130
+ }
131
+
132
+ # Convert the log entry to a vector (using embeddings)
133
+ log_vector = embeddings.embed_documents([str(log_entry)])[0]
134
+
135
+ # Insert the log into the Qdrant collection
136
+ client.upsert(
137
+ collection_name=logs_collection_name,
138
+ points=[
139
+ models.PointStruct(
140
+ id=hash(timestamp), # Use timestamp hash as ID
141
+ vector=log_vector,
142
+ payload=log_entry
143
+ )
144
+ ]
145
+ )
146
+ logger.info(f"Logged question and answer to Qdrant collection: {logs_collection_name}")
147
+ except Exception as e:
148
+ logger.error(f"Failed to log to Qdrant: {e}")
149
+
150
  llm = ChatGoogleGenerativeAI(
151
  model="gemini-2.0-flash-thinking-exp-01-21",
152
+ temperature=0,
153
  max_tokens=None,
154
  timeout=None,
155
  max_retries=2,
 
160
  template = """
161
  You are a specialized AI assistant for the Mawared HR System, designed to deliver accurate and contextually relevant support based solely on the provided context and chat history.
162
 
 
163
  ---
164
 
165
  Core Principles
 
224
 
225
  - Strict Context Reliance: Base all responses solely on the provided context and chat history.
226
  - Non-Mawared HR Queries: Politely decline to answer questions unrelated to Mawared HR.
227
+ - Answer Format: Always provide accurate answers in numbered steps without revealing your thought process or using code.
228
+
229
  ---
230
 
231
  By adhering to these principles and guidelines, ensure every response is accurate, professional, and easy to follow.
 
305
  # Add final response to chat history
306
  chat_history.add_message("assistant", response)
307
 
308
+ # Log the question and answer to Qdrant
309
+ logger.info("Attempting to log question and answer to Qdrant")
310
+ log_to_qdrant(question, response)
311
 
312
  except Exception as e:
313
  logger.error(f"Error during question processing: {e}")