Shriharsh commited on
Commit
428a54e
·
verified ·
1 Parent(s): 2b61584

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -32
app.py CHANGED
@@ -3,15 +3,7 @@ import gradio as gr
3
  from transformers import pipeline
4
  from sentence_transformers import SentenceTransformer, util
5
  import PyPDF2
6
-
7
- # Set up logging with immediate writing
8
- logging.basicConfig(
9
- filename='support_bot_log.txt',
10
- level=logging.INFO,
11
- format='%(asctime)s - %(message)s',
12
- force=True # Ensures any existing handlers are replaced and logging starts fresh
13
- )
14
- logger = logging.getLogger()
15
 
16
  # Load models
17
  qa_model = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
@@ -39,39 +31,38 @@ def find_relevant_section(query, sections, section_embeddings):
39
 
40
  SIMILARITY_THRESHOLD = 0.4
41
  if similarity_score >= SIMILARITY_THRESHOLD:
42
- logger.info(f"Found relevant section using embeddings for query: {query}")
43
  return best_section
44
 
45
- logger.info(f"Low similarity ({similarity_score}). Falling back to keyword search.")
46
 
47
  # Keyword-based fallback search with stopword filtering
48
- query_words = {word for word in query.lower().split() if word not in stopwords}
49
- for section in sections:
50
  section_words = {word for word in section.lower().split() if word not in stopwords}
51
  common_words = query_words.intersection(section_words)
52
  if len(common_words) >= 2:
53
- logger.info(f"Keyword match found for query: {query} with common words: {common_words}")
54
  return section
55
 
56
- logger.info(f"No good keyword match found. Returning default fallback response.")
57
  return "I don’t have enough information to answer that."
58
 
59
  # Process the uploaded file with detailed logging
60
  def process_file(file, state):
61
  if file is None:
62
- logger.info("No file uploaded.")
63
  return [("Bot", "Please upload a file.")], state
64
 
65
  file_path = file.name
66
  if file_path.lower().endswith(".pdf"):
67
- logger.info(f"Uploaded PDF file: {file_path}")
68
  text = extract_text_from_pdf(file_path)
69
  elif file_path.lower().endswith(".txt"):
70
- logger.info(f"Uploaded TXT file: {file_path}")
71
  with open(file_path, 'r', encoding='utf-8') as f:
72
  text = f.read()
73
  else:
74
- logger.error(f"Unsupported file format: {file_path}")
75
  return [("Bot", "Unsupported file format. Please upload a PDF or TXT file.")], state
76
 
77
  sections = text.split('\n\n')
@@ -83,18 +74,18 @@ def process_file(file, state):
83
  state['feedback_count'] = 0
84
  state['mode'] = 'waiting_for_query'
85
  state['chat_history'] = [("Bot", "File processed. You can now ask questions.")]
86
- logger.info(f"Processed file: {file_path}")
87
  return state['chat_history'], state
88
 
89
  # Handle user input (queries and feedback)
90
  def handle_input(user_input, state):
91
  if state['mode'] == 'waiting_for_upload':
92
  state['chat_history'].append(("Bot", "Please upload a file first."))
93
- logger.info("User attempted to interact without uploading a file.")
94
  return state['chat_history'], state
95
  elif state['mode'] == 'waiting_for_query':
96
  if user_input.lower() == "exit":
97
- logger.info("User entered 'exit'. Ending session.")
98
  state['mode'] = 'exited'
99
  state['chat_history'].append(("User", "exit"))
100
  state['chat_history'].append(("Bot", "Session ended. You can download the log file."))
@@ -114,10 +105,10 @@ def handle_input(user_input, state):
114
  state['chat_history'].append(("User", query))
115
  state['chat_history'].append(("Bot", f"Answer: {answer}\nPlease provide feedback: good, too vague, not helpful."))
116
  # Log the query and initial answer here:
117
- logger.info(f"Query: {query}, Answer: {answer}")
118
  elif state['mode'] == 'waiting_for_feedback':
119
  if user_input.lower() == "exit":
120
- logger.info("User entered 'exit'. Ending session.")
121
  state['mode'] = 'exited'
122
  state['chat_history'].append(("User", "exit"))
123
  state['chat_history'].append(("Bot", "Session ended. You can download the log file."))
@@ -125,15 +116,15 @@ def handle_input(user_input, state):
125
 
126
  feedback = user_input.lower()
127
  state['chat_history'].append(("User", feedback))
128
- logger.info(f"Feedback: {feedback}")
129
  if feedback == "good" or state['feedback_count'] >= 2:
130
  state['mode'] = 'waiting_for_query'
131
  if feedback == "good":
132
  state['chat_history'].append(("Bot", "Thank you for your feedback. You can ask another question."))
133
- logger.info("Feedback accepted as 'good'. Waiting for next query.")
134
  else:
135
  state['chat_history'].append(("Bot", "Maximum feedback iterations reached. You can ask another question."))
136
- logger.info("Max feedback iterations reached. Waiting for next query.")
137
  else:
138
  query = state['current_query']
139
  context = find_relevant_section(query, state['sections'], state['section_embeddings'])
@@ -143,15 +134,15 @@ def handle_input(user_input, state):
143
  adjusted_answer = qa_model(question=query + " Please provide more detailed information with examples.", context=context)['answer']
144
  else:
145
  state['chat_history'].append(("Bot", "Please provide valid feedback: good, too vague, not helpful."))
146
- logger.info(f"Invalid feedback received: {feedback}")
147
  return state['chat_history'], state
148
  state['last_answer'] = adjusted_answer
149
  state['feedback_count'] += 1
150
  state['chat_history'].append(("Bot", f"Updated answer: {adjusted_answer}\nPlease provide feedback: good, too vague, not helpful."))
151
- logger.info(f"Adjusted answer: {adjusted_answer}")
152
  elif state['mode'] == 'exited':
153
  state['chat_history'].append(("Bot", "Session is over. Please download the log."))
154
- logger.info("User interacted after exiting.")
155
  return state['chat_history'], state
156
 
157
  # Initial state
@@ -163,9 +154,22 @@ initial_state = {
163
  'feedback_count': 0,
164
  'mode': 'waiting_for_upload',
165
  'chat_history': [("Bot", "Please upload a PDF or TXT file to start.")],
166
- 'last_answer': None
 
167
  }
168
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  # Gradio interface
170
  with gr.Blocks() as demo:
171
  state = gr.State(initial_state)
@@ -173,7 +177,7 @@ with gr.Blocks() as demo:
173
  chat = gr.Chatbot()
174
  user_input = gr.Textbox(label="Your query or feedback")
175
  submit_btn = gr.Button("Submit")
176
- log_file = gr.File(label="Download Log File", value="support_bot_log.txt") # Added for log download
177
 
178
  # Process file upload
179
  file_upload.upload(process_file, inputs=[file_upload, state], outputs=[chat, state])
@@ -181,4 +185,17 @@ with gr.Blocks() as demo:
181
  # Handle user input and clear the textbox
182
  submit_btn.click(handle_input, inputs=[user_input, state], outputs=[chat, state]).then(lambda: "", None, user_input)
183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  demo.launch(share=True)
 
3
  from transformers import pipeline
4
  from sentence_transformers import SentenceTransformer, util
5
  import PyPDF2
6
+ import os
 
 
 
 
 
 
 
 
7
 
8
  # Load models
9
  qa_model = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
 
31
 
32
  SIMILARITY_THRESHOLD = 0.4
33
  if similarity_score >= SIMILARITY_THRESHOLD:
34
+ log_message(f"Found relevant section using embeddings for query: {query}")
35
  return best_section
36
 
37
+ log_message(f"Low similarity ({similarity_score}). Falling back to keyword search.")
38
 
39
  # Keyword-based fallback search with stopword filtering
40
+ query_words = {word for word in sections:
 
41
  section_words = {word for word in section.lower().split() if word not in stopwords}
42
  common_words = query_words.intersection(section_words)
43
  if len(common_words) >= 2:
44
+ log_message(f"Keyword match found for query: {query} with common words: {common_words}")
45
  return section
46
 
47
+ log_message(f"No good keyword match found. Returning default fallback response.")
48
  return "I don’t have enough information to answer that."
49
 
50
  # Process the uploaded file with detailed logging
51
  def process_file(file, state):
52
  if file is None:
53
+ log_message("No file uploaded.")
54
  return [("Bot", "Please upload a file.")], state
55
 
56
  file_path = file.name
57
  if file_path.lower().endswith(".pdf"):
58
+ log_message(f"Uploaded PDF file: {file_path}")
59
  text = extract_text_from_pdf(file_path)
60
  elif file_path.lower().endswith(".txt"):
61
+ log_message(f"Uploaded TXT file: {file_path}")
62
  with open(file_path, 'r', encoding='utf-8') as f:
63
  text = f.read()
64
  else:
65
+ log_message(f"Unsupported file format: {file_path}")
66
  return [("Bot", "Unsupported file format. Please upload a PDF or TXT file.")], state
67
 
68
  sections = text.split('\n\n')
 
74
  state['feedback_count'] = 0
75
  state['mode'] = 'waiting_for_query'
76
  state['chat_history'] = [("Bot", "File processed. You can now ask questions.")]
77
+ log_message(f"Processed file: {file_path}")
78
  return state['chat_history'], state
79
 
80
  # Handle user input (queries and feedback)
81
  def handle_input(user_input, state):
82
  if state['mode'] == 'waiting_for_upload':
83
  state['chat_history'].append(("Bot", "Please upload a file first."))
84
+ log_message("User attempted to interact without uploading a file.")
85
  return state['chat_history'], state
86
  elif state['mode'] == 'waiting_for_query':
87
  if user_input.lower() == "exit":
88
+ log_message("User entered 'exit'. Ending session.")
89
  state['mode'] = 'exited'
90
  state['chat_history'].append(("User", "exit"))
91
  state['chat_history'].append(("Bot", "Session ended. You can download the log file."))
 
105
  state['chat_history'].append(("User", query))
106
  state['chat_history'].append(("Bot", f"Answer: {answer}\nPlease provide feedback: good, too vague, not helpful."))
107
  # Log the query and initial answer here:
108
+ log_message(f"Query: {query}, Answer: {answer}")
109
  elif state['mode'] == 'waiting_for_feedback':
110
  if user_input.lower() == "exit":
111
+ log_message("User entered 'exit'. Ending session.")
112
  state['mode'] = 'exited'
113
  state['chat_history'].append(("User", "exit"))
114
  state['chat_history'].append(("Bot", "Session ended. You can download the log file."))
 
116
 
117
  feedback = user_input.lower()
118
  state['chat_history'].append(("User", feedback))
119
+ log_message(f"Feedback: {feedback}")
120
  if feedback == "good" or state['feedback_count'] >= 2:
121
  state['mode'] = 'waiting_for_query'
122
  if feedback == "good":
123
  state['chat_history'].append(("Bot", "Thank you for your feedback. You can ask another question."))
124
+ log_message("Feedback accepted as 'good'. Waiting for next query.")
125
  else:
126
  state['chat_history'].append(("Bot", "Maximum feedback iterations reached. You can ask another question."))
127
+ log_message("Max feedback iterations reached. Waiting for next query.")
128
  else:
129
  query = state['current_query']
130
  context = find_relevant_section(query, state['sections'], state['section_embeddings'])
 
134
  adjusted_answer = qa_model(question=query + " Please provide more detailed information with examples.", context=context)['answer']
135
  else:
136
  state['chat_history'].append(("Bot", "Please provide valid feedback: good, too vague, not helpful."))
137
+ log_message(f"Invalid feedback received: {feedback}")
138
  return state['chat_history'], state
139
  state['last_answer'] = adjusted_answer
140
  state['feedback_count'] += 1
141
  state['chat_history'].append(("Bot", f"Updated answer: {adjusted_answer}\nPlease provide feedback: good, too vague, not helpful."))
142
+ log_message(f"Adjusted answer: {adjusted_answer}")
143
  elif state['mode'] == 'exited':
144
  state['chat_history'].append(("Bot", "Session is over. Please download the log."))
145
+ log_message("User interacted after exiting.")
146
  return state['chat_history'], state
147
 
148
  # Initial state
 
154
  'feedback_count': 0,
155
  'mode': 'waiting_for_upload',
156
  'chat_history': [("Bot", "Please upload a PDF or TXT file to start.")],
157
+ 'last_answer': None,
158
+ 'log_messages':# Initialize an empty list for log messages
159
  }
160
 
161
+ # Logging function to store messages in memory
162
+ def log_message(message):
163
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
164
+ log_entry = f"{timestamp} - {message}"
165
+ initial_state['log_messages'].append(log_entry)
166
+
167
+ # Function to save logs to file
168
+ def save_logs_to_file():
169
+ with open("support_bot_log.txt", "w") as log_file:
170
+ for log_message in initial_state['log_messages']:
171
+ log_file.write(log_message + "\n")
172
+
173
  # Gradio interface
174
  with gr.Blocks() as demo:
175
  state = gr.State(initial_state)
 
177
  chat = gr.Chatbot()
178
  user_input = gr.Textbox(label="Your query or feedback")
179
  submit_btn = gr.Button("Submit")
180
+ log_file = gr.File(label="Download Log File") # Changed: No initial value
181
 
182
  # Process file upload
183
  file_upload.upload(process_file, inputs=[file_upload, state], outputs=[chat, state])
 
185
  # Handle user input and clear the textbox
186
  submit_btn.click(handle_input, inputs=[user_input, state], outputs=[chat, state]).then(lambda: "", None, user_input)
187
 
188
+ # Update the log file just before download
189
+ log_file.click(save_logs_to_file,, [log_file]) # Trigger save on click
190
+
191
+ # Also save logs when user exits
192
+ user_input.submit(
193
+ lambda user_input, state: (
194
+ save_logs_to_file() if user_input.lower() == "exit" else None,
195
+ state
196
+ ),
197
+ [user_input, state],
198
+ [log_file, state]
199
+ )
200
+
201
  demo.launch(share=True)