Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
-
import re
|
4 |
|
5 |
# Load models
|
6 |
emotion_classifier = pipeline(
|
@@ -11,7 +10,6 @@ emotion_classifier = pipeline(
|
|
11 |
intent_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
12 |
text_generator = pipeline("text2text-generation", model="declare-lab/flan-alpaca-base")
|
13 |
|
14 |
-
# Predefined customer intents
|
15 |
candidate_tasks = [
|
16 |
"change mobile plan",
|
17 |
"top up balance",
|
@@ -23,7 +21,6 @@ candidate_tasks = [
|
|
23 |
"upgrade device"
|
24 |
]
|
25 |
|
26 |
-
# Emotion refinement
|
27 |
urgent_emotions = {"anger", "frustration", "anxiety", "urgency", "afraid", "annoyed"}
|
28 |
moderate_emotions = {"confused", "sad", "tired", "concerned", "sadness"}
|
29 |
|
@@ -55,20 +52,20 @@ def get_emotion_score(emotion):
|
|
55 |
|
56 |
def generate_response(intent, human=True):
|
57 |
prompt = (
|
58 |
-
f"You are a telecom customer service assistant. For the customer intent '{intent}', generate a 3-part response
|
59 |
-
|
60 |
-
"[
|
|
|
|
|
|
|
61 |
"[End: ask if they'd like to proceed with the new plan or need more details.]"
|
62 |
)
|
63 |
result = text_generator(prompt, max_new_tokens=100, do_sample=False)
|
64 |
return result[0]['generated_text'].strip()
|
65 |
|
66 |
-
|
67 |
-
# Streamlit config
|
68 |
st.set_page_config(page_title="Smart Customer Support Assistant", layout="wide")
|
69 |
st.sidebar.title("π Customer Selector")
|
70 |
|
71 |
-
# Session state setup
|
72 |
if "customers" not in st.session_state:
|
73 |
st.session_state.customers = {"Customer A": [], "Customer B": [], "Customer C": []}
|
74 |
if "chat_sessions" not in st.session_state:
|
@@ -88,17 +85,14 @@ if selected_customer not in st.session_state.chat_sessions:
|
|
88 |
|
89 |
session = st.session_state.chat_sessions[selected_customer]
|
90 |
|
91 |
-
# Title
|
92 |
st.title("Smart Customer Support Assistant (for Agents Only)")
|
93 |
|
94 |
-
# Conversation display
|
95 |
st.markdown("### Conversation")
|
96 |
for msg in session["chat"]:
|
97 |
avatar = "π€" if msg['role'] == 'user' else ("π€" if msg.get("auto") else "π¨βπΌ")
|
98 |
with st.chat_message(msg['role'], avatar=avatar):
|
99 |
st.markdown(msg['content'])
|
100 |
|
101 |
-
# Customer input and Analyze
|
102 |
col1, col2 = st.columns([6, 1])
|
103 |
with col1:
|
104 |
user_input = st.text_input("Enter customer message:", key="customer_input")
|
@@ -124,7 +118,7 @@ if analyze_clicked and user_input.strip():
|
|
124 |
|
125 |
if final_score < 0.5 and top_intents:
|
126 |
intent = top_intents[0]
|
127 |
-
response = generate_response(intent, human=
|
128 |
session["chat"].append({"role": "assistant", "content": response, "auto": True})
|
129 |
session["system_result"] = None
|
130 |
session["support_required"] = "π’ Automated response handled this request."
|
@@ -139,11 +133,9 @@ if analyze_clicked and user_input.strip():
|
|
139 |
|
140 |
st.rerun()
|
141 |
|
142 |
-
# Support judgment display
|
143 |
if session["support_required"]:
|
144 |
st.markdown(f"### {session['support_required']}")
|
145 |
|
146 |
-
# Agent input
|
147 |
st.subheader("Agent Response Console")
|
148 |
session["agent_reply"] = st.text_area("Compose your reply:", value=session["agent_reply"], key="agent_reply_box")
|
149 |
if st.button("Send Reply"):
|
@@ -154,7 +146,6 @@ if st.button("Send Reply"):
|
|
154 |
session["support_required"] = ""
|
155 |
st.experimental_rerun()
|
156 |
|
157 |
-
# If human needed: show analysis & suggestions
|
158 |
if session["system_result"] is not None:
|
159 |
st.markdown("#### Customer Status")
|
160 |
st.markdown(f"- **Emotion:** {session['system_result']['emotion'].capitalize()}")
|
@@ -166,7 +157,6 @@ if session["system_result"] is not None:
|
|
166 |
st.markdown(f"**β’ {intent.capitalize()}**")
|
167 |
st.code(suggestion)
|
168 |
|
169 |
-
# End conversation button
|
170 |
if st.button("End Conversation"):
|
171 |
session["chat"] = []
|
172 |
session["system_result"] = None
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
|
|
3 |
|
4 |
# Load models
|
5 |
emotion_classifier = pipeline(
|
|
|
10 |
intent_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
11 |
text_generator = pipeline("text2text-generation", model="declare-lab/flan-alpaca-base")
|
12 |
|
|
|
13 |
candidate_tasks = [
|
14 |
"change mobile plan",
|
15 |
"top up balance",
|
|
|
21 |
"upgrade device"
|
22 |
]
|
23 |
|
|
|
24 |
urgent_emotions = {"anger", "frustration", "anxiety", "urgency", "afraid", "annoyed"}
|
25 |
moderate_emotions = {"confused", "sad", "tired", "concerned", "sadness"}
|
26 |
|
|
|
52 |
|
53 |
def generate_response(intent, human=True):
|
54 |
prompt = (
|
55 |
+
f"You are a telecom customer service assistant. For the customer intent '{intent}', generate a 3-part response:
|
56 |
+
"
|
57 |
+
"[Greeting: polite welcome.]
|
58 |
+
"
|
59 |
+
"[Middle: mention the customer is currently on Plan X (Β₯X/month), and suggest switching to Plan Y with XXGB at Β₯Y/month. Use fictional placeholder values.]
|
60 |
+
"
|
61 |
"[End: ask if they'd like to proceed with the new plan or need more details.]"
|
62 |
)
|
63 |
result = text_generator(prompt, max_new_tokens=100, do_sample=False)
|
64 |
return result[0]['generated_text'].strip()
|
65 |
|
|
|
|
|
66 |
st.set_page_config(page_title="Smart Customer Support Assistant", layout="wide")
|
67 |
st.sidebar.title("π Customer Selector")
|
68 |
|
|
|
69 |
if "customers" not in st.session_state:
|
70 |
st.session_state.customers = {"Customer A": [], "Customer B": [], "Customer C": []}
|
71 |
if "chat_sessions" not in st.session_state:
|
|
|
85 |
|
86 |
session = st.session_state.chat_sessions[selected_customer]
|
87 |
|
|
|
88 |
st.title("Smart Customer Support Assistant (for Agents Only)")
|
89 |
|
|
|
90 |
st.markdown("### Conversation")
|
91 |
for msg in session["chat"]:
|
92 |
avatar = "π€" if msg['role'] == 'user' else ("π€" if msg.get("auto") else "π¨βπΌ")
|
93 |
with st.chat_message(msg['role'], avatar=avatar):
|
94 |
st.markdown(msg['content'])
|
95 |
|
|
|
96 |
col1, col2 = st.columns([6, 1])
|
97 |
with col1:
|
98 |
user_input = st.text_input("Enter customer message:", key="customer_input")
|
|
|
118 |
|
119 |
if final_score < 0.5 and top_intents:
|
120 |
intent = top_intents[0]
|
121 |
+
response = generate_response(intent, human=True)
|
122 |
session["chat"].append({"role": "assistant", "content": response, "auto": True})
|
123 |
session["system_result"] = None
|
124 |
session["support_required"] = "π’ Automated response handled this request."
|
|
|
133 |
|
134 |
st.rerun()
|
135 |
|
|
|
136 |
if session["support_required"]:
|
137 |
st.markdown(f"### {session['support_required']}")
|
138 |
|
|
|
139 |
st.subheader("Agent Response Console")
|
140 |
session["agent_reply"] = st.text_area("Compose your reply:", value=session["agent_reply"], key="agent_reply_box")
|
141 |
if st.button("Send Reply"):
|
|
|
146 |
session["support_required"] = ""
|
147 |
st.experimental_rerun()
|
148 |
|
|
|
149 |
if session["system_result"] is not None:
|
150 |
st.markdown("#### Customer Status")
|
151 |
st.markdown(f"- **Emotion:** {session['system_result']['emotion'].capitalize()}")
|
|
|
157 |
st.markdown(f"**β’ {intent.capitalize()}**")
|
158 |
st.code(suggestion)
|
159 |
|
|
|
160 |
if st.button("End Conversation"):
|
161 |
session["chat"] = []
|
162 |
session["system_result"] = None
|