File size: 4,994 Bytes
3ad83b0 19ee546 3ad83b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
from typing import TypedDict, Dict
from langgraph.graph import StateGraph, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables.graph import MermaidDrawMethod
from IPython.display import display, Image
class State(TypedDict):
query : str
category : str
sentiment : str
response : str
from langchain_groq import ChatGroq
llm = ChatGroq(
temperature = 0,
groq_api_key = "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",
model_name = "llama-3.3-70b-versatile"
)
result = llm.invoke("what is langchain")
result.content
def categorize(state: State) -> State:
"Technical, Billing, General"
prompt = ChatPromptTemplate.from_template(
"Categorize the following customer query into one of these categories: "
"Technical, Billing, General. Query: {query}"
)
chain = prompt | llm
category = chain.invoke({"query": state["query"]}).content
return {"category": category}
def analyze_sentiment(state: State) -> State:
prompt = ChatPromptTemplate.from_template(
"Analyze the sentiment of the following customer query"
"Response with either 'Positive', 'Neutral' , or 'Negative'. Query: {query}"
)
chain = prompt | llm
sentiment = chain.invoke({"query": state["query"]}).content
return {"sentiment": sentiment}
def handle_technical(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a technical support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_billing(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a billing support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_general(state: State)->State:
prompt = ChatPromptTemplate.from_template(
"Provide a general support response to the following query : {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def escalate(state: State)->State:
return {"response": "This query has been escalate to a human agent due to its negative sentiment"}
def route_query(state: State)->State:
if state["sentiment"] == "Negative":
return "escalate"
elif state["category"] == "Technical":
return "handle_technical"
elif state["category"] == "Billing":
return "handle_billing"
else:
return "handle_general"
workflow = StateGraph(State)
workflow.add_node("categorize", categorize)
workflow.add_node("analyze_sentiment", analyze_sentiment)
workflow.add_node("handle_technical", handle_technical)
workflow.add_node("handle_billing", handle_billing)
workflow.add_node("handle_general", handle_general)
workflow.add_node("escalate", escalate)
workflow.add_edge("categorize", "analyze_sentiment")
workflow.add_conditional_edges(
"analyze_sentiment",
route_query,{
"handle_technical" : "handle_technical",
"handle_billing" : "handle_billing",
"handle_general" : "handle_general",
"escalate": "escalate"
}
)
workflow.add_edge("handle_technical", END)
workflow.add_edge("handle_billing", END)
workflow.add_edge("handle_general", END)
workflow.add_edge("escalate", END)
workflow.set_entry_point("categorize")
app = workflow.compile()
def run_customer_support(query: str)->Dict[str, str]:
results = app.invoke({"query": query})
return {
"category":results['category'],
"sentiment":results['sentiment'],
"response": results['response']
}
# query = "my laptop is not charging what should i do?"
# result = run_customer_support(query)
# print(f"Query: {query}")
# print(f"Category : {result['category']}")
# print(f"Sentiment : {result['sentiment']}")
# print(f"Response : {result['response']}")
import gradio as gr
# Define the function that integrates the workflow.
def run_customer_support(query: str) -> Dict[str, str]:
results = app.invoke({"query": query})
return {
"Category": results['category'],
"Sentiment": results['sentiment'],
"Response": results['response']
}
# Create the Gradio interface
def gradio_interface(query: str):
result = run_customer_support(query)
return (
f"**Category:** {result['Category']}\n\n"
f"**Sentiment:** {result['Sentiment']}\n\n"
f"**Response:** {result['Response']}"
)
# Build the Gradio app
gui = gr.Interface(
fn=gradio_interface,
theme='Yntec/HaleyCH_Theme_Orange_Green',
inputs=gr.Textbox(lines=2, placeholder="Enter your query here..."),
outputs=gr.Markdown(),
title="Customer Support Assistant",
description="Provide a query and receive a categorized response. The system analyzes sentiment and routes to the appropriate support channel.",
)
# Launch the app
if __name__ == "__main__":
gui.launch(share=True)
|