Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
|
2 |
from product_recommender import ProductRecommender
|
3 |
from objection_handler import ObjectionHandler
|
@@ -9,6 +10,9 @@ import uuid
|
|
9 |
import pandas as pd
|
10 |
import plotly.express as px
|
11 |
import streamlit as st
|
|
|
|
|
|
|
12 |
|
13 |
# Initialize components
|
14 |
objection_handler = ObjectionHandler("objections.csv") # Use relative path
|
@@ -113,32 +117,68 @@ def handle_objection(text):
|
|
113 |
return "\n".join(responses) if responses else "No objection response found."
|
114 |
return "No objection response found."
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
def real_time_analysis():
|
117 |
st.info("Listening... Say 'stop' to end the process.")
|
118 |
|
119 |
-
|
120 |
-
#
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
st.write(
|
131 |
-
|
132 |
-
#
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
)
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
def run_app():
|
144 |
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
|
@@ -149,8 +189,7 @@ def run_app():
|
|
149 |
|
150 |
if app_mode == "Real-Time Call Analysis":
|
151 |
st.header("Real-Time Sales Call Analysis")
|
152 |
-
|
153 |
-
real_time_analysis()
|
154 |
|
155 |
elif app_mode == "Dashboard":
|
156 |
st.header("Call Summaries and Sentiment Analysis")
|
|
|
1 |
+
from streamlit_webrtc import webrtc_streamer, WebRtcMode
|
2 |
from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
|
3 |
from product_recommender import ProductRecommender
|
4 |
from objection_handler import ObjectionHandler
|
|
|
10 |
import pandas as pd
|
11 |
import plotly.express as px
|
12 |
import streamlit as st
|
13 |
+
import numpy as np
|
14 |
+
from io import BytesIO
|
15 |
+
import wave
|
16 |
|
17 |
# Initialize components
|
18 |
objection_handler = ObjectionHandler("objections.csv") # Use relative path
|
|
|
117 |
return "\n".join(responses) if responses else "No objection response found."
|
118 |
return "No objection response found."
|
119 |
|
120 |
+
def transcribe_audio(audio_bytes):
|
121 |
+
"""Transcribe audio using the transcribe_with_chunks function from sentiment_analysis.py."""
|
122 |
+
try:
|
123 |
+
# Save audio bytes to a temporary WAV file
|
124 |
+
with BytesIO() as wav_buffer:
|
125 |
+
with wave.open(wav_buffer, 'wb') as wf:
|
126 |
+
wf.setnchannels(1) # Mono audio
|
127 |
+
wf.setsampwidth(2) # 2 bytes for int16
|
128 |
+
wf.setframerate(16000) # Sample rate
|
129 |
+
wf.writeframes(audio_bytes)
|
130 |
+
|
131 |
+
# Use the transcribe_with_chunks function from sentiment_analysis.py
|
132 |
+
chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
|
133 |
+
if chunks:
|
134 |
+
return chunks[-1][0] # Return the latest transcribed text
|
135 |
+
except Exception as e:
|
136 |
+
print(f"Error transcribing audio: {e}")
|
137 |
+
return None
|
138 |
+
|
139 |
def real_time_analysis():
|
140 |
st.info("Listening... Say 'stop' to end the process.")
|
141 |
|
142 |
+
def audio_frame_callback(audio_frame):
|
143 |
+
# Convert audio frame to bytes
|
144 |
+
audio_bytes = audio_frame.to_ndarray().tobytes()
|
145 |
+
|
146 |
+
# Transcribe the audio
|
147 |
+
text = transcribe_audio(audio_bytes)
|
148 |
+
if text:
|
149 |
+
st.write(f"*Recognized Text:* {text}")
|
150 |
+
|
151 |
+
# Analyze sentiment
|
152 |
+
sentiment, score = analyze_sentiment(text)
|
153 |
+
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
|
154 |
+
|
155 |
+
# Handle objection
|
156 |
+
objection_response = handle_objection(text)
|
157 |
+
st.write(f"*Objection Response:* {objection_response}")
|
158 |
+
|
159 |
+
# Get product recommendation
|
160 |
+
recommendations = []
|
161 |
+
if is_valid_input(text) and is_relevant_sentiment(score):
|
162 |
+
query_embedding = model.encode([text])
|
163 |
+
distances, indices = product_recommender.index.search(query_embedding, 1)
|
164 |
+
|
165 |
+
if distances[0][0] < 1.5: # Similarity threshold
|
166 |
+
recommendations = product_recommender.get_recommendations(text)
|
167 |
+
|
168 |
+
if recommendations:
|
169 |
+
st.write("*Product Recommendations:*")
|
170 |
+
for rec in recommendations:
|
171 |
+
st.write(rec)
|
172 |
+
|
173 |
+
return audio_frame
|
174 |
+
|
175 |
+
# Start WebRTC audio stream
|
176 |
+
webrtc_ctx = webrtc_streamer(
|
177 |
+
key="real-time-audio",
|
178 |
+
mode=WebRtcMode.SENDONLY,
|
179 |
+
audio_frame_callback=audio_frame_callback,
|
180 |
+
media_stream_constraints={"audio": True, "video": False},
|
181 |
+
)
|
182 |
|
183 |
def run_app():
|
184 |
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
|
|
|
189 |
|
190 |
if app_mode == "Real-Time Call Analysis":
|
191 |
st.header("Real-Time Sales Call Analysis")
|
192 |
+
real_time_analysis()
|
|
|
193 |
|
194 |
elif app_mode == "Dashboard":
|
195 |
st.header("Call Summaries and Sentiment Analysis")
|