omvishesh commited on
Commit
50b6ccf
·
verified ·
1 Parent(s): 16a11cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +183 -36
app.py CHANGED
@@ -1,5 +1,126 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -7,58 +128,84 @@ For more information on `huggingface_hub` Inference API support, please check th
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
 
 
10
  def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
-
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
25
-
26
  messages.append({"role": "user", "content": message})
27
-
28
  response = ""
29
-
 
30
  for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from typing import TypedDict, Dict
4
+ from langgraph.graph import StateGraph, END
5
+ from langchain_core.prompts import ChatPromptTemplate
6
+ from langchain_core.runnables.graph import MermaidDrawMethod
7
+ from IPython.display import display, Image
8
+
9
+ class State(TypedDict):
10
+ query: str
11
+ category: str
12
+ sentiment: str
13
+ response: str
14
+
15
+ from langchain_groq import ChatGroq
16
+
17
+ llm = ChatGroq(
18
+ temperature=0,
19
+ groq_api_key="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",
20
+ model_name="llama-3.3-70b-versatile"
21
+ )
22
+
23
+
24
+ def categorize(state: State) -> State:
25
+ """Categorize the query."""
26
+ prompt = ChatPromptTemplate.from_template(
27
+ "Categorize the following customer query into one of these categories: "
28
+ "Technical, Billing, General. Query: {query}"
29
+ )
30
+ chain = prompt | llm
31
+ category = chain.invoke({"query": state["query"]}).content
32
+ return {"category": category}
33
+
34
+ def analyze_sentiment(state: State) -> State:
35
+ """Analyze sentiment of the query."""
36
+ prompt = ChatPromptTemplate.from_template(
37
+ "Analyze the sentiment of the following customer query "
38
+ "Response with either 'Positive', 'Neutral', or 'Negative'. Query: {query}"
39
+ )
40
+ chain = prompt | llm
41
+ sentiment = chain.invoke({"query": state["query"]}).content
42
+ return {"sentiment": sentiment}
43
+
44
+ def handle_technical(state: State) -> State:
45
+ """Handle technical queries."""
46
+ prompt = ChatPromptTemplate.from_template(
47
+ "Provide a technical support response to the following query: {query}"
48
+ )
49
+ chain = prompt | llm
50
+ response = chain.invoke({"query": state["query"]}).content
51
+ return {"response": response}
52
+
53
+ def handle_billing(state: State) -> State:
54
+ """Handle billing queries."""
55
+ prompt = ChatPromptTemplate.from_template(
56
+ "Provide a billing support response to the following query: {query}"
57
+ )
58
+ chain = prompt | llm
59
+ response = chain.invoke({"query": state["query"]}).content
60
+ return {"response": response}
61
+
62
+ def handle_general(state: State) -> State:
63
+ """Handle general queries."""
64
+ prompt = ChatPromptTemplate.from_template(
65
+ "Provide a general support response to the following query: {query}"
66
+ )
67
+ chain = prompt | llm
68
+ response = chain.invoke({"query": state["query"]}).content
69
+ return {"response": response}
70
+
71
+ def escalate(state: State) -> State:
72
+ """Escalate negative sentiment queries."""
73
+ return {"response": "This query has been escalated to a human agent due to its negative sentiment."}
74
+
75
+ def route_query(state: State) -> State:
76
+ """Route query based on category and sentiment."""
77
+ if state["sentiment"] == "Negative":
78
+ return "escalate"
79
+ elif state["category"] == "Technical":
80
+ return "handle_technical"
81
+ elif state["category"] == "Billing":
82
+ return "handle_billing"
83
+ else:
84
+ return "handle_general"
85
+
86
+ workflow = StateGraph(State)
87
+
88
+ workflow.add_node("categorize", categorize)
89
+ workflow.add_node("analyze_sentiment", analyze_sentiment)
90
+ workflow.add_node("handle_technical", handle_technical)
91
+ workflow.add_node("handle_billing", handle_billing)
92
+ workflow.add_node("handle_general", handle_general)
93
+ workflow.add_node("escalate", escalate)
94
+
95
+ workflow.add_edge("categorize", "analyze_sentiment")
96
+ workflow.add_conditional_edges(
97
+ "analyze_sentiment",
98
+ route_query, {
99
+ "handle_technical": "handle_technical",
100
+ "handle_billing": "handle_billing",
101
+ "handle_general": "handle_general",
102
+ "escalate": "escalate"
103
+ }
104
+ )
105
+
106
+ workflow.add_edge("handle_technical", END)
107
+ workflow.add_edge("handle_billing", END)
108
+ workflow.add_edge("handle_general", END)
109
+ workflow.add_edge("escalate", END)
110
+
111
+ workflow.set_entry_point("categorize")
112
+
113
+ app = workflow.compile()
114
+
115
+ # Define the function that integrates the workflow.
116
+ def run_customer_support(query: str) -> Dict[str, str]:
117
+ results = app.invoke({"query": query})
118
+ return {
119
+ "Category": results['category'],
120
+ "Sentiment": results['sentiment'],
121
+ "Response": results['response']
122
+ }
123
+
124
 
125
  """
126
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
128
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
129
 
130
 
131
+ import gradio as gr
132
+
133
  def respond(
134
+ message,
135
+ history: list[tuple[str, str]],
136
+ system_message,
137
+ max_tokens,
138
+ temperature,
139
+ top_p
140
  ):
141
  messages = [{"role": "system", "content": system_message}]
142
+
143
  for val in history:
144
  if val[0]:
145
  messages.append({"role": "user", "content": val[0]})
146
  if val[1]:
147
  messages.append({"role": "assistant", "content": val[1]})
148
+
149
  messages.append({"role": "user", "content": message})
150
+
151
  response = ""
152
+
153
+ # Simulate streaming from the client
154
  for message in client.chat_completion(
155
+ messages,
156
+ max_tokens=max_tokens,
157
+ stream=True,
158
+ temperature=temperature,
159
+ top_p=top_p
160
  ):
161
  token = message.choices[0].delta.content
 
162
  response += token
163
  yield response
164
 
165
+ # Define a custom Gradio Chat Interface
166
+ with gr.Blocks() as demo:
167
+ gr.Markdown("### AI-Powered Customer Support Assistant")
168
+
169
+ chatbot = gr.ChatInterface(
170
+ respond,
171
+ additional_inputs=[
172
+ gr.Textbox(
173
+ value="You are a friendly chatbot.",
174
+ label="System Message",
175
+ info="Customize how the assistant behaves in conversations."
176
+ ),
177
+ gr.Slider(
178
+ minimum=1,
179
+ maximum=2048,
180
+ value=512,
181
+ step=1,
182
+ label="Max New Tokens",
183
+ info="Maximum number of tokens for the assistant's response."
184
+ ),
185
+ gr.Slider(
186
+ minimum=0.1,
187
+ maximum=4.0,
188
+ value=0.7,
189
+ step=0.1,
190
+ label="Temperature",
191
+ info="Controls randomness in the assistant's response."
192
+ ),
193
+ gr.Slider(
194
+ minimum=0.1,
195
+ maximum=1.0,
196
+ value=0.95,
197
+ step=0.05,
198
+ label="Top-p (Nucleus Sampling)",
199
+ info="Limits sampling to a subset of tokens with cumulative probability top_p."
200
+ ),
201
+ ]
202
+ )
203
+
204
+ gr.Markdown("### Instructions")
205
+ gr.Textbox(
206
+ value="Enter your query, select response settings, and start the conversation.",
207
+ interactive=False,
208
+ )
209
 
210
  if __name__ == "__main__":
211
  demo.launch()