mcp-101 / app_tool.py
Yoon-gu Hwang
์—…๋ฐ์ดํŠธ
0db572d
raw
history blame
13.8 kB
import gradio as gr
from gradio import ChatMessage
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage
from langchain_openai import ChatOpenAI
import asyncio
import time
import os
from pprint import pprint
# ์ƒํƒœ ์ •์˜
class ChatState(TypedDict):
messages: Annotated[list[BaseMessage], operator.add]
current_response: str
agent_type: str
context: dict
step_info: str
# LLM ์ดˆ๊ธฐํ™”
llm = ChatOpenAI(
model="gpt-3.5-turbo",
temperature=0.7,
)
def step1_analyzer_node(state: ChatState) -> ChatState:
"""1๋‹จ๊ณ„: ๋ฉ”์‹œ์ง€ ๋ถ„์„ ๋…ธ๋“œ"""
last_message = state["messages"][-1].content
# ๋ถ„์„ ์‹œ๋ฎฌ๋ ˆ์ด์…˜ (์‹ค์ œ๋กœ๋Š” ๋” ๋ณต์žกํ•œ ๋กœ์ง)
analysis = {
"length": len(last_message),
"has_question": "?" in last_message,
"language": "korean" if any(ord(c) > 127 for c in last_message) else "english",
"sentiment": "positive" if any(word in last_message.lower() for word in ["์ข‹", "๊ฐ์‚ฌ", "๊ณ ๋งˆ์›Œ", "thanks", "good"]) else "neutral"
}
return {
"messages": [],
"current_response": "",
"agent_type": "",
"context": {"analysis": analysis},
"step_info": f"๐Ÿ“Š **1๋‹จ๊ณ„ ์™„๋ฃŒ** - ๋ฉ”์‹œ์ง€ ๋ถ„์„\n- ๊ธธ์ด: {analysis['length']}์ž\n- ์งˆ๋ฌธ ํฌํ•จ: {'์˜ˆ' if analysis['has_question'] else '์•„๋‹ˆ์˜ค'}\n- ์–ธ์–ด: {analysis['language']}\n- ๊ฐ์ •: {analysis['sentiment']}"
}
def step2_classifier_node(state: ChatState) -> ChatState:
"""2๋‹จ๊ณ„: ์˜๋„ ๋ถ„๋ฅ˜ ๋…ธ๋“œ"""
last_message = state["messages"][-1].content.lower()
analysis = state["context"]["analysis"]
# ์˜๋„ ๋ถ„๋ฅ˜ ๋กœ์ง
if any(word in last_message for word in ["์ฝ”๋“œ", "ํ”„๋กœ๊ทธ๋ž˜๋ฐ", "python", "๊ฐœ๋ฐœ", "ํ•จ์ˆ˜", "ํด๋ž˜์Šค"]):
agent_type = "programmer"
confidence = 0.9
elif any(word in last_message for word in ["๋‚ ์”จ", "๋‰ด์Šค", "์ •๋ณด", "๊ฒ€์ƒ‰", "์ฐพ์•„"]):
agent_type = "informer"
confidence = 0.8
elif any(word in last_message for word in ["๊ณ„์‚ฐ", "์ˆ˜ํ•™", "๋”ํ•˜๊ธฐ", "๋นผ๊ธฐ", "๊ณฑํ•˜๊ธฐ", "๋‚˜๋ˆ„๊ธฐ"]):
agent_type = "calculator"
confidence = 0.95
elif any(word in last_message for word in ["์ฐฝ์ž‘", "์‹œ", "์†Œ์„ค", "์ด์•ผ๊ธฐ", "๊ธ€"]):
agent_type = "creative"
confidence = 0.85
else:
agent_type = "general"
confidence = 0.7
context = state["context"]
context["classification"] = {
"agent_type": agent_type,
"confidence": confidence
}
return {
"messages": [],
"current_response": "",
"agent_type": agent_type,
"context": context,
"step_info": f"๐ŸŽฏ **2๋‹จ๊ณ„ ์™„๋ฃŒ** - ์˜๋„ ๋ถ„๋ฅ˜\n- ๋ถ„๋ฅ˜ ๊ฒฐ๊ณผ: {agent_type}\n- ์‹ ๋ขฐ๋„: {confidence:.1%}\n- ๋‹ค์Œ ๋‹จ๊ณ„: {'์ „๋ฌธ ์ฒ˜๋ฆฌ' if confidence > 0.8 else '์ผ๋ฐ˜ ์ฒ˜๋ฆฌ'}"
}
def step3_context_enricher_node(state: ChatState) -> ChatState:
"""3๋‹จ๊ณ„: ์ปจํ…์ŠคํŠธ ๊ฐ•ํ™” ๋…ธ๋“œ"""
agent_type = state["agent_type"]
# ์—์ด์ „ํŠธ ํƒ€์ž…๋ณ„ ์ปจํ…์ŠคํŠธ ๊ฐ•ํ™”
enriched_context = {
"programmer": {
"system_prompt": "๋‹น์‹ ์€ ๊ฒฝํ—˜์ด ํ’๋ถ€ํ•œ ์‹œ๋‹ˆ์–ด ๊ฐœ๋ฐœ์ž์ž…๋‹ˆ๋‹ค. ์ฝ”๋“œ ์˜ˆ์‹œ์™€ ํ•จ๊ป˜ ๋ช…ํ™•ํ•˜๊ณ  ์‹ค์šฉ์ ์ธ ๋‹ต๋ณ€์„ ์ œ๊ณตํ•˜์„ธ์š”.",
"tools": ["์ฝ”๋“œ ์‹คํ–‰", "๋ฌธ์„œ ๊ฒ€์ƒ‰", "๋ฒ ์ŠคํŠธ ํ”„๋ž™ํ‹ฐ์Šค"],
"style": "๊ธฐ์ˆ ์ ์ด๊ณ  ์ •ํ™•ํ•œ"
},
"informer": {
"system_prompt": "๋‹น์‹ ์€ ์ •๋ณด ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค. ์ •ํ™•ํ•˜๊ณ  ์ตœ์‹ ์˜ ์ •๋ณด๋ฅผ ๊ตฌ์กฐํ™”๋œ ํ˜•ํƒœ๋กœ ์ œ๊ณตํ•˜์„ธ์š”.",
"tools": ["์›น ๊ฒ€์ƒ‰", "ํŒฉํŠธ ์ฒดํฌ", "๋ฐ์ดํ„ฐ ๋ถ„์„"],
"style": "๊ฐ๊ด€์ ์ด๊ณ  ์ƒ์„ธํ•œ"
},
"calculator": {
"system_prompt": "๋‹น์‹ ์€ ์ˆ˜ํ•™ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค. ๊ณ„์‚ฐ ๊ณผ์ •์„ ๋‹จ๊ณ„๋ณ„๋กœ ์„ค๋ช…ํ•˜๊ณ  ์ •ํ™•ํ•œ ๋‹ต์„ ์ œ๊ณตํ•˜์„ธ์š”.",
"tools": ["์ˆ˜์‹ ๊ณ„์‚ฐ", "๊ทธ๋ž˜ํ”„ ์ƒ์„ฑ", "ํ†ต๊ณ„ ๋ถ„์„"],
"style": "๋…ผ๋ฆฌ์ ์ด๊ณ  ์ฒด๊ณ„์ ์ธ"
},
"creative": {
"system_prompt": "๋‹น์‹ ์€ ์ฐฝ์ž‘ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค. ์ƒ์ƒ๋ ฅ์ด ํ’๋ถ€ํ•˜๊ณ  ๊ฐ์„ฑ์ ์ธ ์ฝ˜ํ…์ธ ๋ฅผ ์ œ์ž‘ํ•˜์„ธ์š”.",
"tools": ["์Šคํ† ๋ฆฌํ…”๋ง", "์‹œ๊ฐ์  ๋ฌ˜์‚ฌ", "๊ฐ์ • ํ‘œํ˜„"],
"style": "์ฐฝ์˜์ ์ด๊ณ  ๊ฐ์„ฑ์ ์ธ"
},
"general": {
"system_prompt": "๋‹น์‹ ์€ ์นœ๊ทผํ•˜๊ณ  ๋„์›€์ด ๋˜๋Š” AI ์–ด์‹œ์Šคํ„ดํŠธ์ž…๋‹ˆ๋‹ค. ์ž์—ฐ์Šค๋Ÿฝ๊ณ  ์ดํ•ดํ•˜๊ธฐ ์‰ฌ์šด ๋‹ต๋ณ€์„ ์ œ๊ณตํ•˜์„ธ์š”.",
"tools": ["์ผ๋ฐ˜ ๋Œ€ํ™”", "์ •๋ณด ์ œ๊ณต", "๋ฌธ์ œ ํ•ด๊ฒฐ"],
"style": "์นœ๊ทผํ•˜๊ณ  ์ž์—ฐ์Šค๋Ÿฌ์šด"
}
}
context = state["context"]
context["enriched"] = enriched_context.get(agent_type, enriched_context["general"])
return {
"messages": [],
"current_response": "",
"agent_type": agent_type,
"context": context,
"step_info": f"๐Ÿ”ง **3๋‹จ๊ณ„ ์™„๋ฃŒ** - ์ปจํ…์ŠคํŠธ ๊ฐ•ํ™”\n- ์—์ด์ „ํŠธ: {agent_type}\n- ์Šคํƒ€์ผ: {context['enriched']['style']}\n- ํ™œ์šฉ ๋„๊ตฌ: {', '.join(context['enriched']['tools'][:2])}"
}
def step4_response_generator_node(state: ChatState) -> ChatState:
"""4๋‹จ๊ณ„: ์‘๋‹ต ์ƒ์„ฑ ๋…ธ๋“œ"""
enriched_context = state["context"]["enriched"]
system_prompt = enriched_context["system_prompt"]
# ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์™€ ํ•จ๊ป˜ ๋ฉ”์‹œ์ง€ ๊ตฌ์„ฑ
messages = [HumanMessage(content=system_prompt)] + state["messages"]
try:
response = llm.invoke(messages)
# ์—์ด์ „ํŠธ ํƒ€์ž…์— ๋”ฐ๋ฅธ ์•„์ด์ฝ˜ ์„ค์ •
icons = {
"programmer": "๐Ÿ’ป",
"informer": "๐Ÿ“š",
"calculator": "๐Ÿ”ข",
"creative": "๐ŸŽจ",
"general": "๐Ÿ’ฌ"
}
icon = icons.get(state["agent_type"], "๐Ÿ’ฌ")
final_response = f"{icon} **[{state['agent_type'].upper()}]**\n\n{response.content}"
return {
"messages": [response],
"current_response": final_response,
"agent_type": state["agent_type"],
"context": state["context"],
"step_info": f"โœ… **4๋‹จ๊ณ„ ์™„๋ฃŒ** - ์‘๋‹ต ์ƒ์„ฑ\n- ์ตœ์ข… ์‘๋‹ต ์ค€๋น„๋จ\n- ์‘๋‹ต ๊ธธ์ด: {len(response.content)}์ž"
}
except Exception as e:
error_msg = f"โŒ ์‘๋‹ต ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
return {
"messages": [AIMessage(content=error_msg)],
"current_response": error_msg,
"agent_type": state["agent_type"],
"context": state["context"],
"step_info": f"โŒ **4๋‹จ๊ณ„ ์‹คํŒจ** - ์˜ค๋ฅ˜ ๋ฐœ์ƒ\n- ์˜ค๋ฅ˜: {str(e)}"
}
def should_continue_to_classifier(state: ChatState) -> str:
return "classifier"
def should_continue_to_enricher(state: ChatState) -> str:
return "enricher"
def should_continue_to_generator(state: ChatState) -> str:
return "generator"
def should_end(state: ChatState) -> str:
return END
# LangGraph ์›Œํฌํ”Œ๋กœ์šฐ ์ƒ์„ฑ
def create_enhanced_workflow():
workflow = StateGraph(ChatState)
# 4๊ฐœ ๋…ธ๋“œ ์ถ”๊ฐ€
workflow.add_node("analyzer", step1_analyzer_node)
workflow.add_node("classifier", step2_classifier_node)
workflow.add_node("enricher", step3_context_enricher_node)
workflow.add_node("generator", step4_response_generator_node)
# ์‹œ์ž‘์  ์„ค์ •
workflow.set_entry_point("analyzer")
# ์ˆœ์ฐจ์  ์—ฃ์ง€ ์ถ”๊ฐ€
workflow.add_conditional_edges(
"analyzer",
should_continue_to_classifier,
{"classifier": "classifier"}
)
workflow.add_conditional_edges(
"classifier",
should_continue_to_enricher,
{"enricher": "enricher"}
)
workflow.add_conditional_edges(
"enricher",
should_continue_to_generator,
{"generator": "generator"}
)
workflow.add_conditional_edges(
"generator",
should_end,
{END: END}
)
return workflow.compile()
# ๊ธ€๋กœ๋ฒŒ ์›Œํฌํ”Œ๋กœ์šฐ ์ธ์Šคํ„ด์Šค
enhanced_workflow = create_enhanced_workflow()
def stream_chatbot_response(message, history):
"""๊ฐ ๋‹จ๊ณ„๋ฅผ ๋ˆ„์ ํ•ด์„œ ์‹ค์‹œ๊ฐ„ ํ‘œ์‹œ"""
if not message.strip():
yield "", history
return
# ๋ฉ”์‹œ์ง€ ํžˆ์Šคํ† ๋ฆฌ๋ฅผ LangChain ๋ฉ”์‹œ์ง€๋กœ ๋ณ€ํ™˜
messages = []
for human_msg, ai_msg in history:
if human_msg and not "๐Ÿ“Š" in human_msg and not "๐ŸŽฏ" in human_msg:
messages.append(HumanMessage(content=human_msg))
if ai_msg and not "๐Ÿ“Š" in ai_msg and not "๐ŸŽฏ" in ai_msg and not "โšก" in ai_msg:
messages.append(AIMessage(content=ai_msg))
# ํ˜„์žฌ ๋ฉ”์‹œ์ง€ ์ถ”๊ฐ€
messages.append(HumanMessage(content=message))
# ์ดˆ๊ธฐ ์ƒํƒœ ์„ค์ •
initial_state = {
"messages": messages,
"current_response": "",
"agent_type": "",
"context": {},
"step_info": ""
}
# ์›Œํฌํ”Œ๋กœ์šฐ๋ฅผ ์ŠคํŠธ๋ฆผ์œผ๋กœ ์‹คํ–‰
current_history = history.copy()
yield "", current_history
time.sleep(0.3)
# ๊ฐ ๋…ธ๋“œ๋ฅผ ์ˆœ์ฐจ์ ์œผ๋กœ ์‹คํ–‰ํ•˜๋ฉด์„œ ์ค‘๊ฐ„ ๊ฒฐ๊ณผ๋ฅผ ๋ˆ„์  ํ‘œ์‹œ
for step_result in enhanced_workflow.stream(initial_state):
node_name = list(step_result.keys())[0]
node_result = step_result[node_name]
pprint(step_result)
if "step_info" in node_result and node_result["step_info"]:
# ํ˜„์žฌ ๋‹จ๊ณ„ ์ •๋ณด๋ฅผ ๋ˆ„์  ๋ฆฌ์ŠคํŠธ์— ์ถ”๊ฐ€
step_info = node_result["step_info"]
pprint(step_info)
# ๋ˆ„์ ๋œ ๋ชจ๋“  ๋‹จ๊ณ„ ์ •๋ณด๋ฅผ ํ‘œ์‹œ
current_history.append(ChatMessage( role="assistant",
content=step_info,
metadata={"title": f"{node_name}", "status": "done"}))
yield "", current_history
time.sleep(0.2) # ์‹œ๊ฐ์  ํšจ๊ณผ๋ฅผ ์œ„ํ•œ ์ง€์—ฐ
current_history.append(ChatMessage( role="assistant",
content=step_result["generator"]["current_response"]))
yield "", current_history
def clear_chat():
"""์ฑ„ํŒ… ํžˆ์Šคํ† ๋ฆฌ ์ดˆ๊ธฐํ™”"""
return []
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
def create_enhanced_gradio_interface():
with gr.Blocks(title="Enhanced LangGraph ์ฑ—๋ด‡", theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# ๐Ÿš€ Enhanced LangGraph + Gradio ์ฑ—๋ด‡
**4๋‹จ๊ณ„ ์ฒ˜๋ฆฌ ๊ณผ์ •์„ ์‹ค์‹œ๊ฐ„์œผ๋กœ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋Š” AI ์ฑ—๋ด‡**
**์ฒ˜๋ฆฌ ๋‹จ๊ณ„:**
1. ๐Ÿ“Š **๋ฉ”์‹œ์ง€ ๋ถ„์„** - ์ž…๋ ฅ ๋ฉ”์‹œ์ง€์˜ ํŠน์„ฑ ๋ถ„์„
2. ๐ŸŽฏ **์˜๋„ ๋ถ„๋ฅ˜** - ์‚ฌ์šฉ์ž ์˜๋„์— ๋”ฐ๋ฅธ ์—์ด์ „ํŠธ ์„ ํƒ
3. ๐Ÿ”ง **์ปจํ…์ŠคํŠธ ๊ฐ•ํ™”** - ์ „๋ฌธ ๋„๋ฉ”์ธ๋ณ„ ์ปจํ…์ŠคํŠธ ์„ค์ •
4. โœ… **์‘๋‹ต ์ƒ์„ฑ** - ์ตœ์ ํ™”๋œ ๋‹ต๋ณ€ ์ƒ์„ฑ
**์ง€์› ์—์ด์ „ํŠธ:** ๐Ÿ’ป ํ”„๋กœ๊ทธ๋ž˜๋จธ | ๐Ÿ“š ์ •๋ณด์ „๋ฌธ๊ฐ€ | ๐Ÿ”ข ๊ณ„์‚ฐ๊ธฐ | ๐ŸŽจ ์ฐฝ์ž‘๊ฐ€ | ๐Ÿ’ฌ ์ผ๋ฐ˜๋Œ€ํ™”
"""
)
# ์ฑ—๋ด‡ ์ปดํฌ๋„ŒํŠธ
chatbot = gr.Chatbot(
value=[],
height=400,
show_label=False,
container=True,
type="messages"
)
with gr.Row():
msg = gr.Textbox(
placeholder="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”... (๊ฐ ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๊ฐ€ ์‹ค์‹œ๊ฐ„์œผ๋กœ ํ‘œ์‹œ๋ฉ๋‹ˆ๋‹ค)",
show_label=False,
scale=4,
container=False
)
submit_btn = gr.Button("์ „์†ก", scale=1, variant="primary")
clear_btn = gr.Button("์ดˆ๊ธฐํ™”", scale=1, variant="secondary")
# ์ƒํƒœ ํ‘œ์‹œ
with gr.Row():
gr.Markdown("๐Ÿ’ก **ํŒ**: ๋‹ค์–‘ํ•œ ์ฃผ์ œ๋กœ ๋Œ€ํ™”ํ•ด๋ณด์„ธ์š”. ๊ฐ ๋‹จ๊ณ„๋ณ„ ์ฒ˜๋ฆฌ ๊ณผ์ •์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค!")
# ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ - ์ŠคํŠธ๋ฆฌ๋ฐ ๋ฐฉ์‹์œผ๋กœ ๋ณ€๊ฒฝ
submit_btn.click(
stream_chatbot_response,
inputs=[msg, chatbot],
outputs=[msg, chatbot]
)
msg.submit(
stream_chatbot_response,
inputs=[msg, chatbot],
outputs=[msg, chatbot]
)
# ์ดˆ๊ธฐํ™” ๋ฒ„ํŠผ
clear_btn.click(
clear_chat,
outputs=[chatbot]
)
# ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์˜ˆ์ œ ์งˆ๋ฌธ๋“ค
with gr.Row():
with gr.Column():
gr.Markdown("### ๐Ÿ’ป ํ”„๋กœ๊ทธ๋ž˜๋ฐ")
gr.Examples(
examples=[
"Python์œผ๋กœ ํ”ผ๋ณด๋‚˜์น˜ ์ˆ˜์—ด ํ•จ์ˆ˜ ๋งŒ๋“œ๋Š” ๋ฐฉ๋ฒ•?",
"๋”•์…”๋„ˆ๋ฆฌ์™€ ๋ฆฌ์ŠคํŠธ์˜ ์ฐจ์ด์ ์„ ์•Œ๋ ค์ค˜",
"ํด๋ž˜์Šค์™€ ๊ฐ์ฒด์— ๋Œ€ํ•ด ์„ค๋ช…ํ•ด์ค˜"
],
inputs=msg
)
with gr.Column():
gr.Markdown("### ๐Ÿ”ข ๊ณ„์‚ฐ/์ˆ˜ํ•™")
gr.Examples(
examples=[
"25 ๊ณฑํ•˜๊ธฐ 37์€ ์–ผ๋งˆ์•ผ?",
"๋ณต๋ฆฌ ๊ณ„์‚ฐ ๋ฐฉ๋ฒ•์„ ์•Œ๋ ค์ค˜",
"์‚ผ๊ฐํ•จ์ˆ˜์— ๋Œ€ํ•ด ์„ค๋ช…ํ•ด์ค˜"
],
inputs=msg
)
with gr.Column():
gr.Markdown("### ๐ŸŽจ ์ฐฝ์ž‘")
gr.Examples(
examples=[
"๋ด„์— ๋Œ€ํ•œ ์งง์€ ์‹œ๋ฅผ ์จ์ค˜",
"์šฐ์ฃผ ์—ฌํ–‰ ์ด์•ผ๊ธฐ๋ฅผ ๋งŒ๋“ค์–ด์ค˜",
"์ฐฝ์˜์ ์ธ ์•„์ด๋””์–ด๋ฅผ ์ œ์•ˆํ•ด์ค˜"
],
inputs=msg
)
return demo
demo = create_enhanced_gradio_interface()
if __name__ == "__main__":
demo.launch()