omvishesh commited on
Commit
3ad83b0
·
verified ·
1 Parent(s): 153539a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +156 -0
app.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict, Dict
2
+ from langgraph.graph import StateGraph, END
3
+ from langchain_core.prompts import ChatPromptTemplate
4
+ from langchain_core.runnables.graph import MermaidDrawMethod
5
+ from IPython.display import display, Image
6
+
7
+ class State(TypedDict):
8
+ query : str
9
+ category : str
10
+ sentiment : str
11
+ response : str
12
+
13
+ from langchain_groq import ChatGroq
14
+
15
+ llm = ChatGroq(
16
+ temperature = 0,
17
+ groq_api_key = "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",
18
+ model_name = "llama-3.3-70b-versatile"
19
+ )
20
+ result = llm.invoke("what is langchain")
21
+ result.content
22
+
23
+ def categorize(state: State) -> State:
24
+ "Technical, Billing, General"
25
+ prompt = ChatPromptTemplate.from_template(
26
+ "Categorize the following customer query into one of these categories: "
27
+ "Technical, Billing, General. Query: {query}"
28
+ )
29
+ chain = prompt | llm
30
+ category = chain.invoke({"query": state["query"]}).content
31
+ return {"category": category}
32
+
33
+ def analyze_sentiment(state: State) -> State:
34
+ prompt = ChatPromptTemplate.from_template(
35
+ "Analyze the sentiment of the following customer query"
36
+ "Response with either 'Position', 'Neutral' , or 'Negative'. Query: {query}"
37
+ )
38
+ chain = prompt | llm
39
+ sentiment = chain.invoke({"query": state["query"]}).content
40
+ return {"sentiment": sentiment}
41
+
42
+ def handle_technical(state: State)->State:
43
+ prompt = ChatPromptTemplate.from_template(
44
+ "Provide a technical support response to the following query : {query}"
45
+ )
46
+ chain = prompt | llm
47
+ response = chain.invoke({"query": state["query"]}).content
48
+ return {"response": response}
49
+
50
+ def handle_billing(state: State)->State:
51
+ prompt = ChatPromptTemplate.from_template(
52
+ "Provide a billing support response to the following query : {query}"
53
+ )
54
+ chain = prompt | llm
55
+ response = chain.invoke({"query": state["query"]}).content
56
+ return {"response": response}
57
+
58
+ def handle_general(state: State)->State:
59
+ prompt = ChatPromptTemplate.from_template(
60
+ "Provide a general support response to the following query : {query}"
61
+ )
62
+ chain = prompt | llm
63
+ response = chain.invoke({"query": state["query"]}).content
64
+ return {"response": response}
65
+
66
+ def escalate(state: State)->State:
67
+ return {"response": "This query has been escalate to a human agent due to its negative sentiment"}
68
+
69
+ def route_query(state: State)->State:
70
+ if state["sentiment"] == "Negative":
71
+ return "escalate"
72
+ elif state["category"] == "Technical":
73
+ return "handle_technical"
74
+ elif state["category"] == "Billing":
75
+ return "handle_billing"
76
+ else:
77
+ return "handle_general"
78
+
79
+ workflow = StateGraph(State)
80
+
81
+ workflow.add_node("categorize", categorize)
82
+ workflow.add_node("analyze_sentiment", analyze_sentiment)
83
+ workflow.add_node("handle_technical", handle_technical)
84
+ workflow.add_node("handle_billing", handle_billing)
85
+ workflow.add_node("handle_general", handle_general)
86
+ workflow.add_node("escalate", escalate)
87
+
88
+ workflow.add_edge("categorize", "analyze_sentiment")
89
+ workflow.add_conditional_edges(
90
+ "analyze_sentiment",
91
+ route_query,{
92
+ "handle_technical" : "handle_technical",
93
+ "handle_billing" : "handle_billing",
94
+ "handle_general" : "handle_general",
95
+ "escalate": "escalate"
96
+ }
97
+ )
98
+
99
+ workflow.add_edge("handle_technical", END)
100
+ workflow.add_edge("handle_billing", END)
101
+ workflow.add_edge("handle_general", END)
102
+ workflow.add_edge("escalate", END)
103
+
104
+ workflow.set_entry_point("categorize")
105
+
106
+ app = workflow.compile()
107
+
108
+
109
+ def run_customer_support(query: str)->Dict[str, str]:
110
+ results = app.invoke({"query": query})
111
+ return {
112
+ "category":results['category'],
113
+ "sentiment":results['sentiment'],
114
+ "response": results['response']
115
+ }
116
+
117
+ # query = "my laptop is not charging what should i do?"
118
+ # result = run_customer_support(query)
119
+ # print(f"Query: {query}")
120
+ # print(f"Category : {result['category']}")
121
+ # print(f"Sentiment : {result['sentiment']}")
122
+ # print(f"Response : {result['response']}")
123
+
124
+ import gradio as gr
125
+
126
+ # Define the function that integrates the workflow.
127
+ def run_customer_support(query: str) -> Dict[str, str]:
128
+ results = app.invoke({"query": query})
129
+ return {
130
+ "Category": results['category'],
131
+ "Sentiment": results['sentiment'],
132
+ "Response": results['response']
133
+ }
134
+
135
+ # Create the Gradio interface
136
+ def gradio_interface(query: str):
137
+ result = run_customer_support(query)
138
+ return (
139
+ f"**Category:** {result['Category']}\n\n"
140
+ f"**Sentiment:** {result['Sentiment']}\n\n"
141
+ f"**Response:** {result['Response']}"
142
+ )
143
+
144
+ # Build the Gradio app
145
+ gui = gr.Interface(
146
+ fn=gradio_interface,
147
+ theme='Yntec/HaleyCH_Theme_Orange_Green',
148
+ inputs=gr.Textbox(lines=2, placeholder="Enter your query here..."),
149
+ outputs=gr.Markdown(),
150
+ title="Customer Support Assistant",
151
+ description="Provide a query and receive a categorized response. The system analyzes sentiment and routes to the appropriate support channel.",
152
+ )
153
+
154
+ # Launch the app
155
+ if __name__ == "__main__":
156
+ gui.launch(share=True)