Spaces:
Build error
Build error
Upload 9 files
Browse files- app.py +298 -0
- credentials.json +13 -0
- main.py +58 -0
- objection_handling.py +42 -0
- recommendations.py +19 -0
- requirements.txt +0 -0
- sentiment.py +177 -0
- setup.py +13 -0
- sheets.py +77 -0
app.py
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import speech_recognition as sr
|
2 |
+
from sentiment import analyze_sentiment
|
3 |
+
from recommendations import ProductRecommender
|
4 |
+
from objection_handling import ObjectionHandler
|
5 |
+
from sheets import fetch_call_data, store_data_in_sheet
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
from setup import config
|
8 |
+
import re
|
9 |
+
import uuid
|
10 |
+
from google.oauth2 import service_account
|
11 |
+
from googleapiclient.discovery import build
|
12 |
+
import pandas as pd
|
13 |
+
import plotly.express as px
|
14 |
+
import plotly.graph_objs as go
|
15 |
+
import streamlit as st
|
16 |
+
|
17 |
+
# Initialize components
|
18 |
+
product_recommender = ProductRecommender(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet2.csv")
|
19 |
+
objection_handler = ObjectionHandler(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet3.csv")
|
20 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
21 |
+
|
22 |
+
def generate_comprehensive_summary(chunks):
|
23 |
+
"""
|
24 |
+
Generate a comprehensive summary from conversation chunks
|
25 |
+
"""
|
26 |
+
# Extract full text from chunks
|
27 |
+
full_text = " ".join([chunk[0] for chunk in chunks])
|
28 |
+
|
29 |
+
# Perform basic analysis
|
30 |
+
total_chunks = len(chunks)
|
31 |
+
sentiments = [chunk[1] for chunk in chunks]
|
32 |
+
|
33 |
+
# Determine overall conversation context
|
34 |
+
context_keywords = {
|
35 |
+
'product_inquiry': ['dress', 'product', 'price', 'stock'],
|
36 |
+
'pricing': ['cost', 'price', 'budget'],
|
37 |
+
'negotiation': ['installment', 'payment', 'manage']
|
38 |
+
}
|
39 |
+
|
40 |
+
# Detect conversation themes
|
41 |
+
themes = []
|
42 |
+
for keyword_type, keywords in context_keywords.items():
|
43 |
+
if any(keyword.lower() in full_text.lower() for keyword in keywords):
|
44 |
+
themes.append(keyword_type)
|
45 |
+
|
46 |
+
# Basic sentiment analysis
|
47 |
+
positive_count = sentiments.count('POSITIVE')
|
48 |
+
negative_count = sentiments.count('NEGATIVE')
|
49 |
+
neutral_count = sentiments.count('NEUTRAL')
|
50 |
+
|
51 |
+
# Key interaction highlights
|
52 |
+
key_interactions = []
|
53 |
+
for chunk in chunks:
|
54 |
+
if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
|
55 |
+
key_interactions.append(chunk[0])
|
56 |
+
|
57 |
+
# Construct summary
|
58 |
+
summary = f"Conversation Summary:\n"
|
59 |
+
|
60 |
+
# Context and themes
|
61 |
+
if 'product_inquiry' in themes:
|
62 |
+
summary += "• Customer initiated a product inquiry about items.\n"
|
63 |
+
|
64 |
+
if 'pricing' in themes:
|
65 |
+
summary += "• Price and budget considerations were discussed.\n"
|
66 |
+
|
67 |
+
if 'negotiation' in themes:
|
68 |
+
summary += "• Customer and seller explored flexible payment options.\n"
|
69 |
+
|
70 |
+
# Sentiment insights
|
71 |
+
summary += f"\nConversation Sentiment:\n"
|
72 |
+
summary += f"• Positive Interactions: {positive_count}\n"
|
73 |
+
summary += f"• Negative Interactions: {negative_count}\n"
|
74 |
+
summary += f"• Neutral Interactions: {neutral_count}\n"
|
75 |
+
|
76 |
+
# Key highlights
|
77 |
+
summary += "\nKey Conversation Points:\n"
|
78 |
+
for interaction in key_interactions[:3]: # Limit to top 3 key points
|
79 |
+
summary += f"• {interaction}\n"
|
80 |
+
|
81 |
+
# Conversation outcome
|
82 |
+
if positive_count > negative_count:
|
83 |
+
summary += "\nOutcome: Constructive and potentially successful interaction."
|
84 |
+
elif negative_count > positive_count:
|
85 |
+
summary += "\nOutcome: Interaction may require further follow-up."
|
86 |
+
else:
|
87 |
+
summary += "\nOutcome: Neutral interaction with potential for future engagement."
|
88 |
+
|
89 |
+
return summary
|
90 |
+
|
91 |
+
def is_valid_input(text):
|
92 |
+
text = text.strip().lower()
|
93 |
+
if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
|
94 |
+
return False
|
95 |
+
return True
|
96 |
+
|
97 |
+
def is_relevant_sentiment(sentiment_score):
|
98 |
+
return sentiment_score > 0.4
|
99 |
+
|
100 |
+
def calculate_overall_sentiment(sentiment_scores):
|
101 |
+
if sentiment_scores:
|
102 |
+
average_sentiment = sum(sentiment_scores) / len(sentiment_scores)
|
103 |
+
overall_sentiment = (
|
104 |
+
"POSITIVE" if average_sentiment > 0 else
|
105 |
+
"NEGATIVE" if average_sentiment < 0 else
|
106 |
+
"NEUTRAL"
|
107 |
+
)
|
108 |
+
else:
|
109 |
+
overall_sentiment = "NEUTRAL"
|
110 |
+
return overall_sentiment
|
111 |
+
|
112 |
+
def real_time_analysis():
|
113 |
+
recognizer = sr.Recognizer()
|
114 |
+
mic = sr.Microphone()
|
115 |
+
|
116 |
+
st.info("Say 'stop' to end the process.")
|
117 |
+
|
118 |
+
sentiment_scores = []
|
119 |
+
transcribed_chunks = []
|
120 |
+
total_text = ""
|
121 |
+
|
122 |
+
try:
|
123 |
+
while True:
|
124 |
+
with mic as source:
|
125 |
+
st.write("Listening...")
|
126 |
+
recognizer.adjust_for_ambient_noise(source)
|
127 |
+
audio = recognizer.listen(source)
|
128 |
+
|
129 |
+
try:
|
130 |
+
st.write("Recognizing...")
|
131 |
+
text = recognizer.recognize_google(audio)
|
132 |
+
st.write(f"*Recognized Text:* {text}")
|
133 |
+
|
134 |
+
if 'stop' in text.lower():
|
135 |
+
st.write("Stopping real-time analysis...")
|
136 |
+
break
|
137 |
+
|
138 |
+
# Append to the total conversation
|
139 |
+
total_text += text + " "
|
140 |
+
sentiment, score = analyze_sentiment(text)
|
141 |
+
sentiment_scores.append(score)
|
142 |
+
|
143 |
+
# Handle objection
|
144 |
+
objection_response = handle_objection(text)
|
145 |
+
|
146 |
+
# Get product recommendation
|
147 |
+
recommendations = []
|
148 |
+
if is_valid_input(text) and is_relevant_sentiment(score):
|
149 |
+
query_embedding = model.encode([text])
|
150 |
+
distances, indices = product_recommender.index.search(query_embedding, 1)
|
151 |
+
|
152 |
+
if distances[0][0] < 1.5: # Similarity threshold
|
153 |
+
recommendations = product_recommender.get_recommendations(text)
|
154 |
+
|
155 |
+
transcribed_chunks.append((text, sentiment, score))
|
156 |
+
|
157 |
+
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
|
158 |
+
st.write(f"*Objection Response:* {objection_response}")
|
159 |
+
|
160 |
+
if recommendations:
|
161 |
+
st.write("*Product Recommendations:*")
|
162 |
+
for rec in recommendations:
|
163 |
+
st.write(rec)
|
164 |
+
|
165 |
+
except sr.UnknownValueError:
|
166 |
+
st.error("Speech Recognition could not understand the audio.")
|
167 |
+
except sr.RequestError as e:
|
168 |
+
st.error(f"Error with the Speech Recognition service: {e}")
|
169 |
+
except Exception as e:
|
170 |
+
st.error(f"Error during processing: {e}")
|
171 |
+
|
172 |
+
# After conversation ends, calculate and display overall sentiment and summary
|
173 |
+
overall_sentiment = calculate_overall_sentiment(sentiment_scores)
|
174 |
+
call_summary = generate_comprehensive_summary(transcribed_chunks)
|
175 |
+
|
176 |
+
st.subheader("Conversation Summary:")
|
177 |
+
st.write(total_text.strip())
|
178 |
+
st.subheader("Overall Sentiment:")
|
179 |
+
st.write(overall_sentiment)
|
180 |
+
|
181 |
+
# Store data in Google Sheets
|
182 |
+
store_data_in_sheet(
|
183 |
+
config["google_sheet_id"],
|
184 |
+
transcribed_chunks,
|
185 |
+
call_summary,
|
186 |
+
overall_sentiment
|
187 |
+
)
|
188 |
+
st.success("Conversation data stored successfully in Google Sheets!")
|
189 |
+
|
190 |
+
except Exception as e:
|
191 |
+
st.error(f"Error in real-time analysis: {e}")
|
192 |
+
|
193 |
+
def handle_objection(text):
|
194 |
+
query_embedding = model.encode([text])
|
195 |
+
distances, indices = objection_handler.index.search(query_embedding, 1)
|
196 |
+
if distances[0][0] < 1.5: # Adjust similarity threshold as needed
|
197 |
+
responses = objection_handler.handle_objection(text)
|
198 |
+
return "\n".join(responses) if responses else "No objection response found."
|
199 |
+
return "No objection response found."
|
200 |
+
|
201 |
+
# (Previous imports remain the same)
|
202 |
+
|
203 |
+
def run_app():
|
204 |
+
st.set_page_config(page_title="Vocalytics", layout="wide")
|
205 |
+
st.title("AI Sales Call Assistant")
|
206 |
+
|
207 |
+
st.sidebar.title("Navigation")
|
208 |
+
app_mode = st.sidebar.radio("Menu", ["Home","Real-Time Recommendations", "Analysis", "Full Call Summary"])
|
209 |
+
|
210 |
+
if app_mode == "Home":
|
211 |
+
st.title("Welcome to the AI Sales Assistant Dashboard!")
|
212 |
+
st.markdown("""
|
213 |
+
### Features:
|
214 |
+
- Real-Time Transcription: Live transcription with sentiment analysis.
|
215 |
+
- Product Recommendations: Relevant suggestions based on customer conversations.
|
216 |
+
- Objection Handling: Automatic detection and response to objections.
|
217 |
+
- Data Summary: Historical insights stored in Google Sheets.
|
218 |
+
- Analytics: Visualize trends and sentiment distribution.
|
219 |
+
""")
|
220 |
+
|
221 |
+
elif app_mode == "Real-Time Recommendations":
|
222 |
+
st.header("Real-Time Recommendations ")
|
223 |
+
if st.button("Start Listening"):
|
224 |
+
real_time_analysis()
|
225 |
+
|
226 |
+
elif app_mode == "Analysis":
|
227 |
+
st.header("Call Summary and Analysis")
|
228 |
+
try:
|
229 |
+
data = fetch_call_data(config["google_sheet_id"])
|
230 |
+
if data.empty:
|
231 |
+
st.warning("No data available in the Google Sheet.")
|
232 |
+
else:
|
233 |
+
# Sentiment Visualizations
|
234 |
+
sentiment_counts = data['Sentiment'].value_counts()
|
235 |
+
|
236 |
+
# Pie Chart
|
237 |
+
col1, col2 = st.columns(2)
|
238 |
+
with col1:
|
239 |
+
st.subheader("Sentiment Distribution")
|
240 |
+
fig_pie = px.pie(
|
241 |
+
values=sentiment_counts.values,
|
242 |
+
names=sentiment_counts.index,
|
243 |
+
title='Call Sentiment Breakdown',
|
244 |
+
color_discrete_map={
|
245 |
+
'POSITIVE': 'green',
|
246 |
+
'NEGATIVE': 'red',
|
247 |
+
'NEUTRAL': 'pink'
|
248 |
+
}
|
249 |
+
)
|
250 |
+
st.plotly_chart(fig_pie)
|
251 |
+
|
252 |
+
# Line Chart for Sentiment Over Time
|
253 |
+
with col2:
|
254 |
+
st.subheader("Sentiment Over Time")
|
255 |
+
if 'Sentiment' in data.columns:
|
256 |
+
data['Index'] = range(1, len(data) + 1) # Generate indices as time proxy
|
257 |
+
fig_line = px.line(
|
258 |
+
data,
|
259 |
+
x='Index',
|
260 |
+
y='Sentiment',
|
261 |
+
title='Sentiment Trend During Calls',
|
262 |
+
markers=True,
|
263 |
+
labels={'Index': 'Call Progress (Sequential)', 'Sentiment': 'Sentiment'}
|
264 |
+
)
|
265 |
+
st.plotly_chart(fig_line)
|
266 |
+
else:
|
267 |
+
st.warning("Sentiment data is not available for trend visualization.")
|
268 |
+
|
269 |
+
# Existing Call Details Section
|
270 |
+
st.subheader("All Calls")
|
271 |
+
display_data = data.copy()
|
272 |
+
display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
|
273 |
+
st.dataframe(display_data[['Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
|
274 |
+
|
275 |
+
except Exception as e:
|
276 |
+
st.error(f"Error loading dashboard: {e}")
|
277 |
+
|
278 |
+
elif app_mode == "Full Call Summary":
|
279 |
+
st.header("Full Call Summary")
|
280 |
+
try:
|
281 |
+
data = fetch_call_data(config["google_sheet_id"])
|
282 |
+
if data.empty:
|
283 |
+
st.warning("No data available in the Google Sheet.")
|
284 |
+
else:
|
285 |
+
data = data.dropna(subset=['Chunk', 'Summary'])
|
286 |
+
|
287 |
+
for index, row in data.iterrows():
|
288 |
+
st.text_area(
|
289 |
+
label=f"Call Summary {index+1}",
|
290 |
+
value=row['Summary'],
|
291 |
+
height=200,
|
292 |
+
key=f"summary_{index}"
|
293 |
+
)
|
294 |
+
except Exception as e:
|
295 |
+
st.error(f"Error loading full call summary: {e}")
|
296 |
+
|
297 |
+
if __name__ == "__main__":
|
298 |
+
run_app()
|
credentials.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"type": "service_account",
|
3 |
+
"project_id": "teak-citadel-438407-i8",
|
4 |
+
"private_key_id": "fe971d8de6089e8c54f1c853d4485602d5f2c642",
|
5 |
+
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQChc0kgGN1xbuf9\niQ7q8175856U+lkwosOuTLsnzzHnNwJmgxwMUU0OTJCWtNystcVXdwZng21/Gl9E\nJIYbrgeW7WURwz0mCtYvfRFzILrGkOqOKBgAoRbKzesAm7x22TlgIXChPY6sdpFM\nVeiaeXEWwB3CLgoo0C24wQbkLvGhEgoOISaXko90wnm4SJUlPBTK4nSeaw6/Kljz\nFvidvspCfwOz/P3Cs2sPrVQzIZW96Kz4GStMApCentFAfPFU5rKc8r/Nn6cdepTZ\nGwEFM41thmvJo+sENme0Lmu4GLX1DlaZmgBe8df8L0NgknDdAZ4wULXg5U2Higmb\ndVlQVCxBAgMBAAECggEAEMKqlzntly6Hx+K4GTz9oZ4gWt/obWugO9J8ZWQVjqa/\nyYuX2FGCWdcp9XM+K3HNVEincgyLS/QdarIN+AanhAwN/3uQuAfM924Y5yZBLYby\nyqIv57TRbGlXGZ+RP5dKZQfjrOF6k73VVKLgfFTaIB3+mN91r4n5DSKAyCR1ch6G\neBom4Q+5571Z/eYCRUZeMf0fVjw/eGRenMK01d1MQYt59G+PZLm0ZmBAOZmDh6yT\nkaCgpZlQnK6DEnhSHnPTWn3V8D1BUHF30/qypBER2GY9NToRFQ3mk5/BZ8b8JQsU\nOyyzFokSnhIKcYiBwwTXOjeDjlsFu4cBw8/0cx/mKQKBgQDa7+gvydpFf7XRTT+P\nLFoBD0cr2XX+xJ5GbQKhdPlgtZZ15YSGW5ogElRRRlXhewCbkCDGWjNBfqjfjdCe\nVQeF2gqRDyg47OkhK+yKgfk52g++BzuupNgMIsxpSehLHQmuqPVVQFDuTiCQc79s\nZ/h+K6SmtKhb2cfo9te8OOioZQKBgQC8yBHBuMHdzDa3iPfSNGLXFReTOJpQxLBr\nqKqHHPN2zSFQuKVc2Hx0vAoG8B3lny82ewV1qVvvRLfi8tUXKUOquhhPoLbs1knp\nUkp0E18hMZc90XxMb1xqORO0hWQG1A8RjSiBiD5Sj8x+Ev/bx9mvpBdwPQReFQ5e\neN1+35rgrQKBgQCwuJ3OlTxehOuZ+qSnSMGNwWtNdcbM0qB/cFaDRQxWh7zfhVgn\nq6EI6kKtJ/0CeGrRJtGNyhk9cKOXb71JUJE19f45gOaSrIixgeb5Fli53c5/MQ93\ntQ+/HAerpUCgANxj43XETpoFAYM/lm6ZboMiNNA+z+xl17CJ11XkVHtYEQKBgD8B\ngzOlp0aQbr6ovxVwGIDycDDZll9Ylg0IeQmpNvSyl+jqB37KXhdT3wDbyQ6nJ0nG\nm1be1GkyBTGAnHAjfIsRmyWQXR0ElByEM4c0I+7fLzwZm6OjEzP8osAAORcyVWkl\nNJyuzSYfCkAVtUE0HUpKuOvj4X5JBsevDTXYCUftAoGBALvQglLh1Bt7gXGheOMO\ncJQ794nfHMTgOl5Zag/hgDxuSc5sLzJ49bfHg5EkMSbZlEBfHrX9PPBYKHucbZQJ\nWO9ljQCN1cpoyyoyTPzldqkahjpiLBx13+VfqXvKE7jxuU4Aj8PPkf/G7eCVkxDh\n9+X9F5KX0FBt672aGr2C/C3O\n-----END PRIVATE KEY-----\n",
|
6 |
+
"client_email": "[email protected]",
|
7 |
+
"client_id": "111664813751492591578",
|
8 |
+
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
9 |
+
"token_uri": "https://oauth2.googleapis.com/token",
|
10 |
+
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
11 |
+
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/akshaya%40teak-citadel-438407-i8.iam.gserviceaccount.com",
|
12 |
+
"universe_domain": "googleapis.com"
|
13 |
+
}
|
main.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sentiment import transcribe_with_chunks
|
2 |
+
import streamlit as st
|
3 |
+
from sheets import store_data_in_sheet
|
4 |
+
from setup import config
|
5 |
+
from recommendations import ProductRecommender
|
6 |
+
from objection_handling import load_objections, ObjectionHandler
|
7 |
+
|
8 |
+
def main():
|
9 |
+
# Load objections and products
|
10 |
+
product_recommender = ProductRecommender(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet2.csv")
|
11 |
+
objection_handler = ObjectionHandler(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet3.csv")
|
12 |
+
|
13 |
+
# Load objections at the start of the script
|
14 |
+
objections_file_path = r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet3.csv"
|
15 |
+
objections_dict = load_objections(objections_file_path)
|
16 |
+
|
17 |
+
# Call the transcription function which now includes objection handling
|
18 |
+
transcribed_chunks = transcribe_with_chunks(objections_dict)
|
19 |
+
|
20 |
+
total_text = ""
|
21 |
+
sentiment_scores = []
|
22 |
+
|
23 |
+
for chunk, sentiment, score in transcribed_chunks:
|
24 |
+
if chunk.strip():
|
25 |
+
total_text += chunk + " " # Accumulate the conversation text
|
26 |
+
sentiment_scores.append(score if sentiment == "POSITIVE" else -score)
|
27 |
+
|
28 |
+
# Check for product recommendations
|
29 |
+
recommendations = product_recommender.get_recommendations(chunk)
|
30 |
+
if recommendations:
|
31 |
+
print(f"Recommendations for chunk: '{chunk}'")
|
32 |
+
st.write(f"Recommendations for chunk: '{chunk}'")
|
33 |
+
for idx, rec in enumerate(recommendations, 1):
|
34 |
+
print(f"{idx}. {rec}")
|
35 |
+
st.write(f"{idx}. {rec}")
|
36 |
+
|
37 |
+
# Check for objections
|
38 |
+
objection_responses = objection_handler.handle_objection(chunk)
|
39 |
+
if objection_responses:
|
40 |
+
for response in objection_responses:
|
41 |
+
print(f"Objection Response: {response}")
|
42 |
+
st.write(f"Objection Response: {response}")
|
43 |
+
|
44 |
+
|
45 |
+
# Determine overall sentiment
|
46 |
+
overall_sentiment = "POSITIVE" if sum(sentiment_scores) > 0 else "NEGATIVE"
|
47 |
+
print(f"Overall Sentiment: {overall_sentiment}")
|
48 |
+
st.write(f"Overall Sentiment: {overall_sentiment}")
|
49 |
+
|
50 |
+
# Generate a summary of the conversation
|
51 |
+
print(f"Conversation Summary: {total_text.strip()}")
|
52 |
+
st.write(f"Conversation Summary: {total_text.strip()}")
|
53 |
+
|
54 |
+
# Store data in Google Sheets
|
55 |
+
store_data_in_sheet(config["google_sheet_id"], transcribed_chunks, total_text.strip(), overall_sentiment)
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
main()
|
objection_handling.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import streamlit as st
|
3 |
+
from sentence_transformers import SentenceTransformer
|
4 |
+
import faiss
|
5 |
+
|
6 |
+
def load_objections(file_path):
|
7 |
+
"""Load objections from a CSV file into a dictionary."""
|
8 |
+
try:
|
9 |
+
objections_df = pd.read_csv(file_path)
|
10 |
+
objections_dict = {}
|
11 |
+
for index, row in objections_df.iterrows():
|
12 |
+
objections_dict[row['Customer Objection']] = row['Salesperson Response']
|
13 |
+
return objections_dict
|
14 |
+
except Exception as e:
|
15 |
+
print(f"Error loading objections: {e}")
|
16 |
+
st.write(f"Error loading objections: {e}")
|
17 |
+
return {}
|
18 |
+
|
19 |
+
def check_objections(text, objections_dict):
|
20 |
+
"""Check for objections in the given text and return responses."""
|
21 |
+
responses = []
|
22 |
+
for objection, response in objections_dict.items():
|
23 |
+
if objection.lower() in text.lower():
|
24 |
+
responses.append(response)
|
25 |
+
return responses
|
26 |
+
|
27 |
+
class ObjectionHandler:
|
28 |
+
def __init__(self, objection_data_path):
|
29 |
+
self.data = pd.read_csv(objection_data_path,encoding='latin1')
|
30 |
+
self.model = SentenceTransformer('all-MiniLM-L6-v2')
|
31 |
+
self.embeddings = self.model.encode(self.data['Customer Objection'].tolist())
|
32 |
+
self.index = faiss.IndexFlatL2(self.embeddings.shape[1])
|
33 |
+
self.index.add(self.embeddings)
|
34 |
+
|
35 |
+
def handle_objection(self, query, top_n=1):
|
36 |
+
"""Handle objections using embeddings."""
|
37 |
+
query_embedding = self.model.encode([query])
|
38 |
+
distances, indices = self.index.search(query_embedding, top_n)
|
39 |
+
responses = []
|
40 |
+
for i in indices[0]:
|
41 |
+
responses.append(self.data.iloc[i]['Salesperson Response'])
|
42 |
+
return responses
|
recommendations.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from sentence_transformers import SentenceTransformer
|
3 |
+
import faiss
|
4 |
+
|
5 |
+
class ProductRecommender:
|
6 |
+
def __init__(self, product_data_path):
|
7 |
+
self.data = pd.read_csv(product_data_path,encoding='latin1')
|
8 |
+
self.model = SentenceTransformer('all-MiniLM-L6-v2')
|
9 |
+
self.embeddings = self.model.encode(self.data['product_description'].tolist())
|
10 |
+
self.index = faiss.IndexFlatL2(self.embeddings.shape[1])
|
11 |
+
self.index.add(self.embeddings)
|
12 |
+
|
13 |
+
def get_recommendations(self, query, top_n=5):
|
14 |
+
query_embedding = self.model.encode([query])
|
15 |
+
distances, indices = self.index.search(query_embedding, top_n)
|
16 |
+
recommendations = []
|
17 |
+
for i in indices[0]:
|
18 |
+
recommendations.append(self.data.iloc[i]['product_title'] + ": " + self.data.iloc[i]['product_description'])
|
19 |
+
return recommendations
|
requirements.txt
ADDED
Binary file (4.56 kB). View file
|
|
sentiment.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import time
|
4 |
+
import pyaudio
|
5 |
+
from vosk import Model, KaldiRecognizer
|
6 |
+
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
|
7 |
+
from huggingface_hub import login
|
8 |
+
from recommendations import ProductRecommender
|
9 |
+
from objection_handling import load_objections, check_objections # Ensure check_objections is imported
|
10 |
+
from objection_handling import ObjectionHandler
|
11 |
+
from setup import config
|
12 |
+
from sentence_transformers import SentenceTransformer
|
13 |
+
from dotenv import load_dotenv
|
14 |
+
|
15 |
+
# Load environment variables
|
16 |
+
load_dotenv()
|
17 |
+
|
18 |
+
# Initialize the ProductRecommender
|
19 |
+
product_recommender = ProductRecommender(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet2.csv")
|
20 |
+
|
21 |
+
# Hugging Face API setup
|
22 |
+
huggingface_api_key = config["huggingface_api_key"]
|
23 |
+
login(token=huggingface_api_key)
|
24 |
+
|
25 |
+
# Sentiment Analysis Model
|
26 |
+
model_name = "tabularisai/multilingual-sentiment-analysis"
|
27 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
28 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
29 |
+
sentiment_analyzer = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
|
30 |
+
|
31 |
+
# Vosk Speech Recognition Model
|
32 |
+
vosk_model_path = config["vosk_model_path"]
|
33 |
+
|
34 |
+
if not vosk_model_path:
|
35 |
+
raise ValueError("Error: vosk_model_path is not set in the .env file.")
|
36 |
+
|
37 |
+
try:
|
38 |
+
vosk_model = Model(vosk_model_path)
|
39 |
+
print("Vosk model loaded successfully.")
|
40 |
+
except Exception as e:
|
41 |
+
raise ValueError(f"Failed to load Vosk model: {e}")
|
42 |
+
|
43 |
+
recognizer = KaldiRecognizer(vosk_model, 16000)
|
44 |
+
audio = pyaudio.PyAudio()
|
45 |
+
|
46 |
+
stream = audio.open(format=pyaudio.paInt16,
|
47 |
+
channels=1,
|
48 |
+
rate=16000,
|
49 |
+
input=True,
|
50 |
+
frames_per_buffer=4000)
|
51 |
+
stream.start_stream()
|
52 |
+
|
53 |
+
# Function to analyze sentiment
|
54 |
+
def preprocess_text(text):
|
55 |
+
"""Preprocess text for better sentiment analysis."""
|
56 |
+
# Strip whitespace and convert to lowercase
|
57 |
+
processed = text.strip().lower()
|
58 |
+
return processed
|
59 |
+
|
60 |
+
def preprocess_text(text):
|
61 |
+
"""Preprocess text for better sentiment analysis."""
|
62 |
+
return text.strip().lower()
|
63 |
+
|
64 |
+
def analyze_sentiment(text):
|
65 |
+
"""Analyze sentiment of the text using Hugging Face model."""
|
66 |
+
try:
|
67 |
+
if not text.strip():
|
68 |
+
return "NEUTRAL", 0.0
|
69 |
+
|
70 |
+
processed_text = preprocess_text(text)
|
71 |
+
result = sentiment_analyzer(processed_text)[0]
|
72 |
+
|
73 |
+
print(f"Sentiment Analysis Result: {result}")
|
74 |
+
|
75 |
+
# Map raw labels to sentiments
|
76 |
+
sentiment_map = {
|
77 |
+
'Very Negative': "NEGATIVE",
|
78 |
+
'Negative': "NEGATIVE",
|
79 |
+
'Neutral': "NEUTRAL",
|
80 |
+
'Positive': "POSITIVE",
|
81 |
+
'Very Positive': "POSITIVE"
|
82 |
+
}
|
83 |
+
|
84 |
+
sentiment = sentiment_map.get(result['label'], "NEUTRAL")
|
85 |
+
return sentiment, result['score']
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error in sentiment analysis: {e}")
|
89 |
+
return "NEUTRAL", 0.5
|
90 |
+
|
91 |
+
def transcribe_with_chunks(objections_dict):
|
92 |
+
"""Perform real-time transcription with sentiment analysis."""
|
93 |
+
print("Say 'start listening' to begin transcription. Say 'stop listening' to stop.")
|
94 |
+
is_listening = False
|
95 |
+
chunks = []
|
96 |
+
current_chunk = []
|
97 |
+
chunk_start_time = time.time()
|
98 |
+
|
99 |
+
# Initialize handlers with semantic search capabilities
|
100 |
+
objection_handler = ObjectionHandler(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet3.csv")
|
101 |
+
product_recommender = ProductRecommender(r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet2.csv")
|
102 |
+
|
103 |
+
# Load the embeddings model once
|
104 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
105 |
+
|
106 |
+
try:
|
107 |
+
while True:
|
108 |
+
data = stream.read(4000, exception_on_overflow=False)
|
109 |
+
|
110 |
+
if recognizer.AcceptWaveform(data):
|
111 |
+
result = recognizer.Result()
|
112 |
+
text = json.loads(result)["text"]
|
113 |
+
|
114 |
+
if "start listening" in text.lower():
|
115 |
+
is_listening = True
|
116 |
+
print("Listening started. Speak into the microphone.")
|
117 |
+
continue
|
118 |
+
elif "stop listening" in text.lower():
|
119 |
+
is_listening = False
|
120 |
+
print("Listening stopped.")
|
121 |
+
if current_chunk:
|
122 |
+
chunk_text = " ".join(current_chunk)
|
123 |
+
sentiment, score = analyze_sentiment(chunk_text)
|
124 |
+
chunks.append((chunk_text, sentiment, score))
|
125 |
+
current_chunk = []
|
126 |
+
continue
|
127 |
+
|
128 |
+
if is_listening and text.strip():
|
129 |
+
print(f"Transcription: {text}")
|
130 |
+
current_chunk.append(text)
|
131 |
+
|
132 |
+
if time.time() - chunk_start_time > 3:
|
133 |
+
if current_chunk:
|
134 |
+
chunk_text = " ".join(current_chunk)
|
135 |
+
|
136 |
+
# Always process sentiment
|
137 |
+
sentiment, score = analyze_sentiment(chunk_text)
|
138 |
+
chunks.append((chunk_text, sentiment, score))
|
139 |
+
|
140 |
+
# Get objection responses and check similarity score
|
141 |
+
query_embedding = model.encode([chunk_text])
|
142 |
+
distances, indices = objection_handler.index.search(query_embedding, 1)
|
143 |
+
|
144 |
+
# If similarity is high enough, show objection response
|
145 |
+
if distances[0][0] < 1.5: # Threshold for similarity
|
146 |
+
responses = objection_handler.handle_objection(chunk_text)
|
147 |
+
if responses:
|
148 |
+
print("\nSuggested Response:")
|
149 |
+
for response in responses:
|
150 |
+
print(f"→ {response}")
|
151 |
+
|
152 |
+
# Get product recommendations and check similarity score
|
153 |
+
distances, indices = product_recommender.index.search(query_embedding, 1)
|
154 |
+
|
155 |
+
# If similarity is high enough, show recommendations
|
156 |
+
if distances[0][0] < 1.5: # Threshold for similarity
|
157 |
+
recommendations = product_recommender.get_recommendations(chunk_text)
|
158 |
+
if recommendations:
|
159 |
+
print(f"\nRecommendations for this response:")
|
160 |
+
for idx, rec in enumerate(recommendations, 1):
|
161 |
+
print(f"{idx}. {rec}")
|
162 |
+
|
163 |
+
print("\n")
|
164 |
+
current_chunk = []
|
165 |
+
chunk_start_time = time.time()
|
166 |
+
|
167 |
+
except KeyboardInterrupt:
|
168 |
+
print("\nExiting...")
|
169 |
+
stream.stop_stream()
|
170 |
+
|
171 |
+
return chunks
|
172 |
+
|
173 |
+
if __name__ == "__main__":
|
174 |
+
objections_file_path = r"C:\Users\Gowri Shankar\Downloads\AI-Sales-Call-Assistant--main\Sales_Calls_Transcriptions_Sheet3.csv"
|
175 |
+
objections_dict = load_objections(objections_file_path)
|
176 |
+
transcribed_chunks = transcribe_with_chunks(objections_dict)
|
177 |
+
print("Final transcriptions and sentiments:", transcribed_chunks)
|
setup.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
|
4 |
+
load_dotenv()
|
5 |
+
|
6 |
+
config = {
|
7 |
+
"google_creds": os.getenv("google_creds"),
|
8 |
+
"huggingface_api_key": os.getenv("huggingface_api_key"),
|
9 |
+
"google_sheet_id": os.getenv("google_sheet_id"),
|
10 |
+
"vosk_model_path": os.getenv("vosk_model_path"),
|
11 |
+
"objections_file_path" : os.getenv("objections_file_path"),
|
12 |
+
"product_data_path" : os.getenv("product_data_path")
|
13 |
+
}
|
sheets.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
import streamlit as st
|
3 |
+
from google.oauth2 import service_account
|
4 |
+
from googleapiclient.discovery import build
|
5 |
+
from setup import config
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
|
9 |
+
|
10 |
+
def authenticate_google_account():
|
11 |
+
service_account_file = config["google_creds"]
|
12 |
+
if not service_account_file:
|
13 |
+
raise ValueError("Service account credentials path is missing in env_setup.py.")
|
14 |
+
return service_account.Credentials.from_service_account_file(service_account_file, scopes=SCOPES)
|
15 |
+
|
16 |
+
def store_data_in_sheet(sheet_id, chunks, summary, overall_sentiment):
|
17 |
+
creds = authenticate_google_account()
|
18 |
+
service = build('sheets', 'v4', credentials=creds)
|
19 |
+
sheet = service.spreadsheets()
|
20 |
+
|
21 |
+
call_id = str(uuid.uuid4())
|
22 |
+
print(f"Call ID: {call_id}")
|
23 |
+
|
24 |
+
values = []
|
25 |
+
if chunks:
|
26 |
+
first_chunk, first_sentiment, _ = chunks[0]
|
27 |
+
values.append([call_id, first_chunk, first_sentiment, summary, overall_sentiment])
|
28 |
+
for chunk, sentiment, _ in chunks[1:]:
|
29 |
+
values.append(["", chunk, sentiment, "", ""])
|
30 |
+
|
31 |
+
header = ["Call ID", "Chunk", "Sentiment", "Summary", "Overall Sentiment"]
|
32 |
+
all_values = [header] + values
|
33 |
+
|
34 |
+
body = {'values': all_values}
|
35 |
+
try:
|
36 |
+
result = sheet.values().append(
|
37 |
+
spreadsheetId=sheet_id,
|
38 |
+
range="Sheet1!A1",
|
39 |
+
valueInputOption="RAW",
|
40 |
+
body=body
|
41 |
+
).execute()
|
42 |
+
print(f"{result.get('updates').get('updatedCells')} cells updated in Google Sheets.")
|
43 |
+
st.write(f"{result.get('updates').get('updatedCells')} cells updated in Google Sheets.")
|
44 |
+
except Exception as e:
|
45 |
+
print(f"Error updating Google Sheets: {e}")
|
46 |
+
st.write(f"Error updating Google Sheets: {e}")
|
47 |
+
|
48 |
+
def fetch_call_data(sheet_id, range_name="Sheet1!A1:E"):
|
49 |
+
"""
|
50 |
+
Fetch previously saved call data from Google Sheets and return it as a pandas DataFrame.
|
51 |
+
:param sheet_id: ID of the Google Sheet.
|
52 |
+
:param range_name: The range to fetch data from.
|
53 |
+
:return: pandas DataFrame containing the call data.
|
54 |
+
"""
|
55 |
+
creds = authenticate_google_account()
|
56 |
+
service = build('sheets', 'v4', credentials=creds)
|
57 |
+
sheet = service.spreadsheets()
|
58 |
+
|
59 |
+
try:
|
60 |
+
result = sheet.values().get(spreadsheetId=sheet_id, range=range_name).execute()
|
61 |
+
rows = result.get('values', [])
|
62 |
+
if rows:
|
63 |
+
# Convert rows to a pandas DataFrame
|
64 |
+
header = rows[0]
|
65 |
+
data = rows[1:] # Skip the header row
|
66 |
+
df = pd.DataFrame(data, columns=header)
|
67 |
+
print(f"Fetched {len(df)} rows of data from Google Sheets.")
|
68 |
+
st.write(f"Fetched {len(df)} rows of data from Google Sheets.")
|
69 |
+
return df
|
70 |
+
else:
|
71 |
+
print("No data found in the sheet.")
|
72 |
+
st.write("No data found in the sheet.")
|
73 |
+
return pd.DataFrame() # Return an empty DataFrame if no data
|
74 |
+
except Exception as e:
|
75 |
+
print(f"Error fetching data from Google Sheets: {e}")
|
76 |
+
st.write(f"Error fetching data from Google Sheets: {e}")
|
77 |
+
return pd.DataFrame() # Return an empty DataFrame on error
|