Zasha1 commited on
Commit
6143436
·
verified ·
1 Parent(s): 00695de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -26
app.py CHANGED
@@ -15,8 +15,8 @@ import queue
15
  import threading
16
 
17
  # Initialize components
18
- objection_handler = ObjectionHandler("objections.csv") # Use relative path
19
- product_recommender = ProductRecommender("recommendations.csv") # Use relative path
20
  model = SentenceTransformer('all-MiniLM-L6-v2')
21
 
22
  # Queue to hold transcribed text
@@ -41,7 +41,7 @@ def calculate_overall_sentiment(sentiment_scores):
41
  def handle_objection(text):
42
  query_embedding = model.encode([text])
43
  distances, indices = objection_handler.index.search(query_embedding, 1)
44
- if distances[0][0] < 1.5: # Adjust similarity threshold as needed
45
  responses = objection_handler.handle_objection(text)
46
  return "\n".join(responses) if responses else "No objection response found."
47
  return "No objection response found."
@@ -55,23 +55,20 @@ class AudioProcessor(AudioProcessorBase):
55
  audio_data = frame.to_ndarray()
56
  audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
57
 
58
- # Debugging: Check audio data
59
  print(f"Audio data shape: {audio_data.shape}")
60
  print(f"Audio data sample: {audio_data[:10]}")
61
 
62
- # Transcribe the audio
63
  text = self.transcribe_audio(audio_bytes)
64
  if text:
65
- self.q.put(text) # Add transcribed text to the queue
66
 
67
  return frame
68
 
69
  def transcribe_audio(self, audio_bytes):
70
  try:
71
- # Use the transcribe_with_chunks function from sentiment_analysis.py
72
- chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
73
  if chunks:
74
- return chunks[-1][0] # Return the latest transcribed text
75
  except Exception as e:
76
  print(f"Error transcribing audio: {e}")
77
  return None
@@ -79,7 +76,6 @@ class AudioProcessor(AudioProcessorBase):
79
  def real_time_analysis():
80
  st.info("Listening... Say 'stop' to end the process.")
81
 
82
- # Start WebRTC audio stream
83
  webrtc_ctx = webrtc_streamer(
84
  key="real-time-audio",
85
  mode=WebRtcMode.SENDONLY,
@@ -88,26 +84,22 @@ def real_time_analysis():
88
  )
89
 
90
  if webrtc_ctx.state.playing:
91
- # Display transcribed text from the queue
92
  while not transcription_queue.empty():
93
  text = transcription_queue.get()
94
  st.write(f"*Recognized Text:* {text}")
95
 
96
- # Analyze sentiment
97
  sentiment, score = analyze_sentiment(text)
98
  st.write(f"*Sentiment:* {sentiment} (Score: {score})")
99
 
100
- # Handle objection
101
  objection_response = handle_objection(text)
102
  st.write(f"*Objection Response:* {objection_response}")
103
 
104
- # Get product recommendation
105
  recommendations = []
106
  if is_valid_input(text) and is_relevant_sentiment(score):
107
  query_embedding = model.encode([text])
108
  distances, indices = product_recommender.index.search(query_embedding, 1)
109
 
110
- if distances[0][0] < 1.5: # Similarity threshold
111
  recommendations = product_recommender.get_recommendations(text)
112
 
113
  if recommendations:
@@ -120,16 +112,13 @@ def fetch_data_and_display():
120
  st.header("Call Summaries and Sentiment Analysis")
121
  data = fetch_call_data(config["google_sheet_id"])
122
 
123
- # Debugging: Log fetched data
124
- print(f"Fetched data: {data}")
125
 
126
  if data.empty:
127
  st.warning("No data available in the Google Sheet.")
128
  else:
129
- # Sentiment Visualizations
130
  sentiment_counts = data['Sentiment'].value_counts()
131
 
132
- # Pie Chart
133
  col1, col2 = st.columns(2)
134
  with col1:
135
  st.subheader("Sentiment Distribution")
@@ -145,7 +134,6 @@ def fetch_data_and_display():
145
  )
146
  st.plotly_chart(fig_pie)
147
 
148
- # Bar Chart
149
  with col2:
150
  st.subheader("Sentiment Counts")
151
  fig_bar = px.bar(
@@ -162,37 +150,32 @@ def fetch_data_and_display():
162
  )
163
  st.plotly_chart(fig_bar)
164
 
165
- # Existing Call Details Section
166
  st.subheader("All Calls")
167
  display_data = data.copy()
168
  display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
169
  st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
170
 
171
- # Dropdown to select Call ID
172
  unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
173
  call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
174
 
175
- # Display selected Call ID details
176
  call_details = data[data['Call ID'] == call_id]
177
  if not call_details.empty:
178
  st.subheader("Detailed Call Information")
179
  st.write(f"**Call ID:** {call_id}")
180
  st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
181
 
182
- # Expand summary section
183
  st.subheader("Full Call Summary")
184
  st.text_area("Summary:",
185
  value=call_details.iloc[0]['Summary'],
186
  height=200,
187
  disabled=True)
188
 
189
- # Show all chunks for the selected call
190
  st.subheader("Conversation Chunks")
191
  for _, row in call_details.iterrows():
192
  if pd.notna(row['Chunk']):
193
  st.write(f"**Chunk:** {row['Chunk']}")
194
  st.write(f"**Sentiment:** {row['Sentiment']}")
195
- st.write("---") # Separator between chunks
196
  else:
197
  st.error("No details available for the selected Call ID.")
198
  except Exception as e:
 
15
  import threading
16
 
17
  # Initialize components
18
+ objection_handler = ObjectionHandler("objections.csv")
19
+ product_recommender = ProductRecommender("recommendations.csv")
20
  model = SentenceTransformer('all-MiniLM-L6-v2')
21
 
22
  # Queue to hold transcribed text
 
41
  def handle_objection(text):
42
  query_embedding = model.encode([text])
43
  distances, indices = objection_handler.index.search(query_embedding, 1)
44
+ if distances[0][0] < 1.5:
45
  responses = objection_handler.handle_objection(text)
46
  return "\n".join(responses) if responses else "No objection response found."
47
  return "No objection response found."
 
55
  audio_data = frame.to_ndarray()
56
  audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
57
 
 
58
  print(f"Audio data shape: {audio_data.shape}")
59
  print(f"Audio data sample: {audio_data[:10]}")
60
 
 
61
  text = self.transcribe_audio(audio_bytes)
62
  if text:
63
+ self.q.put(text)
64
 
65
  return frame
66
 
67
  def transcribe_audio(self, audio_bytes):
68
  try:
69
+ chunks = transcribe_with_chunks({})
 
70
  if chunks:
71
+ return chunks[-1][0]
72
  except Exception as e:
73
  print(f"Error transcribing audio: {e}")
74
  return None
 
76
  def real_time_analysis():
77
  st.info("Listening... Say 'stop' to end the process.")
78
 
 
79
  webrtc_ctx = webrtc_streamer(
80
  key="real-time-audio",
81
  mode=WebRtcMode.SENDONLY,
 
84
  )
85
 
86
  if webrtc_ctx.state.playing:
 
87
  while not transcription_queue.empty():
88
  text = transcription_queue.get()
89
  st.write(f"*Recognized Text:* {text}")
90
 
 
91
  sentiment, score = analyze_sentiment(text)
92
  st.write(f"*Sentiment:* {sentiment} (Score: {score})")
93
 
 
94
  objection_response = handle_objection(text)
95
  st.write(f"*Objection Response:* {objection_response}")
96
 
 
97
  recommendations = []
98
  if is_valid_input(text) and is_relevant_sentiment(score):
99
  query_embedding = model.encode([text])
100
  distances, indices = product_recommender.index.search(query_embedding, 1)
101
 
102
+ if distances[0][0] < 1.5:
103
  recommendations = product_recommender.get_recommendations(text)
104
 
105
  if recommendations:
 
112
  st.header("Call Summaries and Sentiment Analysis")
113
  data = fetch_call_data(config["google_sheet_id"])
114
 
115
+ print(f"Fetched data: {data}") # Log fetched data
 
116
 
117
  if data.empty:
118
  st.warning("No data available in the Google Sheet.")
119
  else:
 
120
  sentiment_counts = data['Sentiment'].value_counts()
121
 
 
122
  col1, col2 = st.columns(2)
123
  with col1:
124
  st.subheader("Sentiment Distribution")
 
134
  )
135
  st.plotly_chart(fig_pie)
136
 
 
137
  with col2:
138
  st.subheader("Sentiment Counts")
139
  fig_bar = px.bar(
 
150
  )
151
  st.plotly_chart(fig_bar)
152
 
 
153
  st.subheader("All Calls")
154
  display_data = data.copy()
155
  display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
156
  st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
157
 
 
158
  unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
159
  call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
160
 
 
161
  call_details = data[data['Call ID'] == call_id]
162
  if not call_details.empty:
163
  st.subheader("Detailed Call Information")
164
  st.write(f"**Call ID:** {call_id}")
165
  st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
166
 
 
167
  st.subheader("Full Call Summary")
168
  st.text_area("Summary:",
169
  value=call_details.iloc[0]['Summary'],
170
  height=200,
171
  disabled=True)
172
 
 
173
  st.subheader("Conversation Chunks")
174
  for _, row in call_details.iterrows():
175
  if pd.notna(row['Chunk']):
176
  st.write(f"**Chunk:** {row['Chunk']}")
177
  st.write(f"**Sentiment:** {row['Sentiment']}")
178
+ st.write("---")
179
  else:
180
  st.error("No details available for the selected Call ID.")
181
  except Exception as e: