import speech_recognition as sr from sentiment_analysis import analyze_sentiment, transcribe_with_chunks from product_recommender import ProductRecommender from objection_handler import ObjectionHandler from google_sheets import fetch_call_data, store_data_in_sheet from sentence_transformers import SentenceTransformer from env_setup import config import re import uuid import pandas as pd import plotly.express as px import streamlit as st import numpy as np from io import BytesIO import wave import threading import queue from streamlit_webrtc import webrtc_streamer, WebRtcMode, AudioProcessorBase # Initialize components objection_handler = ObjectionHandler("objections.csv") product_recommender = ProductRecommender("recommendations.csv") model = SentenceTransformer('all-MiniLM-L6-v2') # Queue to hold transcribed text transcription_queue = queue.Queue() def generate_comprehensive_summary(chunks): full_text = " ".join([chunk[0] for chunk in chunks]) total_chunks = len(chunks) sentiments = [chunk[1] for chunk in chunks] context_keywords = { 'product_inquiry': ['dress', 'product', 'price', 'stock'], 'pricing': ['cost', 'price', 'budget'], 'negotiation': ['installment', 'payment', 'manage'] } themes = [] for keyword_type, keywords in context_keywords.items(): if any(keyword.lower() in full_text.lower() for keyword in keywords): themes.append(keyword_type) positive_count = sentiments.count('POSITIVE') negative_count = sentiments.count('NEGATIVE') neutral_count = sentiments.count('NEUTRAL') key_interactions = [] for chunk in chunks: if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']): key_interactions.append(chunk[0]) summary = f"Conversation Summary:\n" if 'product_inquiry' in themes: summary += "• Customer initiated a product inquiry about items.\n" if 'pricing' in themes: summary += "• Price and budget considerations were discussed.\n" if 'negotiation' in themes: summary += "• Customer and seller explored flexible payment options.\n" summary += f"\nConversation Sentiment:\n" summary += f"• Positive Interactions: {positive_count}\n" summary += f"• Negative Interactions: {negative_count}\n" summary += f"• Neutral Interactions: {neutral_count}\n" summary += "\nKey Conversation Points:\n" for interaction in key_interactions[:3]: summary += f"• {interaction}\n" if positive_count > negative_count: summary += "\nOutcome: Constructive and potentially successful interaction." elif negative_count > positive_count: summary += "\nOutcome: Interaction may require further follow-up." else: summary += "\nOutcome: Neutral interaction with potential for future engagement." return summary def is_valid_input(text): text = text.strip().lower() if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None: return False return True def is_relevant_sentiment(sentiment_score): return sentiment_score > 0.4 def calculate_overall_sentiment(sentiment_scores): if sentiment_scores: average_sentiment = sum(sentiment_scores) / len(sentiment_scores) overall_sentiment = ( "POSITIVE" if average_sentiment > 0 else "NEGATIVE" if average_sentiment < 0 else "NEUTRAL" ) else: overall_sentiment = "NEUTRAL" return overall_sentiment def handle_objection(text): query_embedding = model.encode([text]) distances, indices = objection_handler.index.search(query_embedding, 1) if distances[0][0] < 1.5: responses = objection_handler.handle_objection(text) return "\n".join(responses) if responses else "No objection response found." return "No objection response found." def transcribe_audio(audio_bytes, sample_rate=16000): try: with BytesIO() as wav_buffer: with wave.open(wav_buffer, 'wb') as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio_bytes) st.write("Audio saved, attempting transcription...") chunks = transcribe_with_chunks(wav_buffer.getvalue()) if chunks: st.write(f"Transcribed chunks: {chunks}") return chunks[-1][0] except Exception as e: st.error(f"Error transcribing audio: {e}") return None class AudioProcessor(AudioProcessorBase): def __init__(self): self.transcription_queue = transcription_queue def recv(self, frame): audio_data = frame.to_ndarray() st.write(f"Received audio frame: {audio_data.shape}") audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() text = transcribe_audio(audio_bytes) if text: st.write(f"Transcribed text: {text}") self.transcription_queue.put(text) return frame def real_time_analysis(): st.info("Listening... Say 'stop' to end the process.") webrtc_ctx = webrtc_streamer( key="real-time-audio", mode=WebRtcMode.SENDONLY, audio_processor_factory=AudioProcessor, media_stream_constraints={"audio": True, "video": False}, ) if webrtc_ctx.state.playing: while not transcription_queue.empty(): text = transcription_queue.get() st.write(f"*Recognized Text:* {text}") sentiment, score = analyze_sentiment(text) st.write(f"*Sentiment:* {sentiment} (Score: {score})") objection_response = handle_objection(text) st.write(f"*Objection Response:* {objection_response}") recommendations = [] if is_valid_input(text) and is_relevant_sentiment(score): query_embedding = model.encode([text]) distances, indices = product_recommender.index.search(query_embedding, 1) if distances[0][0] < 1.5: recommendations = product_recommender.get_recommendations(text) if recommendations: st.write("*Product Recommendations:*") for rec in recommendations: st.write(rec) def load_google_sheets_data(): try: data = fetch_call_data(config["google_sheet_id"]) if data.empty: st.warning("No data available in the Google Sheet.") else: return data except Exception as e: st.error(f"Error loading data from Google Sheets: {e}") return None def run_app(): st.set_page_config(page_title="Sales Call Assistant", layout="wide") st.title("AI Sales Call Assistant") st.markdown(""" """, unsafe_allow_html=True) st.markdown("""

AI Sales Call Assistant

""", unsafe_allow_html=True) st.sidebar.title("Navigation") app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"]) if app_mode == "Real-Time Call Analysis": st.markdown('
', unsafe_allow_html=True) st.header("Real-Time Sales Call Analysis") st.markdown('
', unsafe_allow_html=True) if st.button("Start Listening"): real_time_analysis() elif app_mode == "Dashboard": st.markdown('
', unsafe_allow_html=True) st.header("Call Summaries and Sentiment Analysis") data = load_google_sheets_data() if data is not None: try: sentiment_counts = data['Sentiment'].value_counts() product_mentions = filter_product_mentions(data[['Chunk']].values.tolist(), product_titles) product_mentions_df = pd.DataFrame(list(product_mentions.items()), columns=['Product', 'Count']) col1, col2 = st.columns(2) with col1: st.subheader("Sentiment Distribution") fig_bar = px.bar( x=sentiment_counts.index, y=sentiment_counts.values, title='Number of Calls by Sentiment', labels={'x': 'Sentiment', 'y': 'Number of Calls'}, color=sentiment_counts.index, color_discrete_map={ 'POSITIVE': 'green', 'NEGATIVE': 'red', 'NEUTRAL': 'blue' } ) st.plotly_chart(fig_bar) with col2: st.subheader("Most Mentioned Products") fig_products = px.pie( values=product_mentions_df['Count'], names=product_mentions_df['Product'], title='Most Mentioned Products' ) st.plotly_chart(fig_products) st.subheader("All Calls") display_data = data.copy() display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...' st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']]) unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique() call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids) call_details = data[data['Call ID'] == call_id] if not call_details.empty: st.subheader("Detailed Call Information") st.write(f"**Call ID:** {call_id}") st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}") st.subheader("Full Call Summary") st.text_area("Summary:", value=call_details.iloc[0]['Summary'], height=200, disabled=True) st.subheader("Conversation Chunks") for _, row in call_details.iterrows(): if pd.notna(row['Chunk']): st.write(f"**Chunk:** {row['Chunk']}") st.write(f"**Sentiment:** {row['Sentiment']}") st.write("---") else: st.error("No details available for the selected Call ID.") except Exception as e: st.error(f"Error processing data: {e}") st.markdown('
', unsafe_allow_html=True) if __name__ == "__main__": run_app()