SUMANA SUMANAKUL (ING)
commited on
Commit
·
a55a706
1
Parent(s):
474fb03
fix requirements
Browse files
app.py
CHANGED
@@ -5,217 +5,92 @@ import gradio as gr
|
|
5 |
import uuid
|
6 |
from utils.chat import ChatLaborLaw
|
7 |
|
8 |
-
# ==============================================================================
|
9 |
-
# 1. GLOBAL INITIALIZATION (ทำครั้งเดียวตอนแอปเริ่มทำงาน)
|
10 |
-
# ==============================================================================
|
11 |
-
# --- Langfuse Handler ---
|
12 |
-
LANGFUSE_HANDLER = CallbackHandler()
|
13 |
-
|
14 |
-
# --- LLM Initialization ---
|
15 |
-
# (ปรับแก้ส่วนนี้เพื่อเลือกว่าจะใช้โมเดลไหนเป็น default)
|
16 |
-
MODEL_NAME_LLM = "jai-chat-1-3-2"
|
17 |
-
TEMPERATURE = 0
|
18 |
-
|
19 |
-
if MODEL_NAME_LLM == "jai-chat-1-3-2":
|
20 |
-
LLM_MAIN = ChatOpenAI(
|
21 |
-
model=MODEL_NAME_LLM,
|
22 |
-
api_key=os.getenv("JAI_API_KEY"),
|
23 |
-
base_url=os.getenv("CHAT_BASE_URL"),
|
24 |
-
temperature=TEMPERATURE,
|
25 |
-
max_tokens=2048,
|
26 |
-
max_retries=2,
|
27 |
-
seed=13
|
28 |
-
)
|
29 |
-
elif MODEL_NAME_LLM == "gemini-2.0-flash":
|
30 |
-
LLM_MAIN = ChatGoogleGenerativeAI(
|
31 |
-
model="gemini-1.5-flash",
|
32 |
-
google_api_key=os.getenv("GOOGLE_API_KEY"),
|
33 |
-
temperature=TEMPERATURE,
|
34 |
-
max_output_tokens=2048,
|
35 |
-
convert_system_message_to_human=True,
|
36 |
-
)
|
37 |
-
else:
|
38 |
-
raise ValueError(f"Unsupported LLM model '{MODEL_NAME_LLM}'.")
|
39 |
-
|
40 |
-
# --- Database and Retriever Initialization ---
|
41 |
-
MONGO_CONNECTION_STR = os.getenv('MONGO_CONNECTION_STRING')
|
42 |
-
MONGO_DATABASE = os.getenv('MONGO_DATABASE')
|
43 |
-
MONGO_COLLECTION = os.getenv('MONGO_COLLECTION')
|
44 |
-
|
45 |
-
MONGO_CLIENT = MongoClient(MONGO_CONNECTION_STR)
|
46 |
-
DB = MONGO_CLIENT[MONGO_DATABASE]
|
47 |
-
MONGO_COLLECTION_INSTANCE = DB[MONGO_COLLECTION]
|
48 |
-
RETRIEVER = RerankRetriever()
|
49 |
-
|
50 |
-
print("Global objects initialized successfully.")
|
51 |
-
|
52 |
-
# ==============================================================================
|
53 |
-
# 2. HELPER FUNCTIONS (แปลงมาจากเมธอดใน Class)
|
54 |
-
# ==============================================================================
|
55 |
-
|
56 |
-
def format_main_context(list_of_documents):
|
57 |
-
formatted_docs = []
|
58 |
-
for i, doc in enumerate(list_of_documents):
|
59 |
-
metadata = doc.metadata
|
60 |
-
formatted = f"Doc{i}\n{metadata.get('law_name', '-')}\nมาตรา\t{metadata.get('section_number', '-')}\n{doc.page_content}\nประกาศ\t{metadata.get('publication_date', '-')}\nเริ่มใช้\t{metadata.get('effective_date', '-')}"
|
61 |
-
formatted_docs.append(formatted)
|
62 |
-
return "\n\n".join(formatted_docs)
|
63 |
-
|
64 |
-
def format_ref_context(list_of_docs):
|
65 |
-
formatted_ref_docs = []
|
66 |
-
for i, doc in enumerate(list_of_docs):
|
67 |
-
formatted = f"{doc.get('law_name', '-')}\nมาตรา\t{doc.get('section_number', '-')}\n{doc.get('text', '-')}"
|
68 |
-
formatted_ref_docs.append(formatted)
|
69 |
-
return "\n\n".join(formatted_ref_docs)
|
70 |
-
|
71 |
-
def get_main_context(user_query, **kwargs):
|
72 |
-
compression_retriever = RETRIEVER.get_compression_retriever(**kwargs)
|
73 |
-
return compression_retriever.invoke(user_query)
|
74 |
-
|
75 |
-
def get_ref_context(main_context_docs):
|
76 |
-
all_reference_ids = set()
|
77 |
-
for context in main_context_docs:
|
78 |
-
references_list = context.metadata.get('references', [])
|
79 |
-
if isinstance(references_list, list):
|
80 |
-
for ref_str in references_list:
|
81 |
-
all_reference_ids.add(ref_str.replace("มาตรา", "").strip())
|
82 |
-
|
83 |
-
if not all_reference_ids:
|
84 |
-
return []
|
85 |
-
|
86 |
-
mongo_query = {"law_type": "summary", "section_number": {"$in": list(all_reference_ids)}}
|
87 |
-
projection = {"text": 1, "law_name": 1, "section_number": 1}
|
88 |
-
return list(MONGO_COLLECTION_INSTANCE.find(mongo_query, projection))
|
89 |
-
|
90 |
-
# ==============================================================================
|
91 |
-
# 3. CORE LOGIC FUNCTIONS (RAG / Non-RAG)
|
92 |
-
# ==============================================================================
|
93 |
-
|
94 |
-
async def call_rag(user_input: str, langchain_history: list) -> str:
|
95 |
-
context_docs = get_main_context(user_input, law_type="summary")
|
96 |
-
main_context_str = format_main_context(context_docs)
|
97 |
-
|
98 |
-
ref_context_docs = get_ref_context(context_docs)
|
99 |
-
ref_context_str = format_ref_context(ref_context_docs) if ref_context_docs else "-"
|
100 |
-
|
101 |
-
rag_input_data = {
|
102 |
-
"question": user_input,
|
103 |
-
"main_context": main_context_str,
|
104 |
-
"ref_context": ref_context_str,
|
105 |
-
"history": langchain_history
|
106 |
-
}
|
107 |
-
|
108 |
-
try:
|
109 |
-
prompt_messages = RAG_CHAT_PROMPT.format_messages(**rag_input_data)
|
110 |
-
response = await LLM_MAIN.ainvoke(prompt_messages, config={"callbacks": [LANGFUSE_HANDLER]})
|
111 |
-
clean_response = re.sub(r"<[^>]+>|#+", "", response.content).strip()
|
112 |
-
return clean_response
|
113 |
-
except Exception as e:
|
114 |
-
print(f"Error during RAG LLM call: {e}")
|
115 |
-
return "ขออภัย ระบบขัดข้องขณะประมวลผลคำตอบ"
|
116 |
-
|
117 |
-
async def call_non_rag(user_input: str) -> str:
|
118 |
-
prompt_messages = NON_RAG_PROMPT.format(user_input=user_input)
|
119 |
-
response = await LLM_MAIN.ainvoke(prompt_messages, config={"callbacks": [LANGFUSE_HANDLER]})
|
120 |
-
return response.content.strip() if response and response.content else "ขออภัย ระบบไม่สามารถตอบคำถามได้ในขณะนี้"
|
121 |
-
|
122 |
-
# ==============================================================================
|
123 |
-
# 4. GRADIO EVENT HANDLERS
|
124 |
-
# ==============================================================================
|
125 |
|
126 |
def initialize_session():
|
127 |
-
"""
|
|
|
|
|
128 |
session_id = str(uuid.uuid4())[:8]
|
129 |
-
|
|
|
130 |
|
131 |
-
async def
|
132 |
"""
|
133 |
-
|
134 |
"""
|
135 |
-
if not prompt.strip():
|
136 |
-
return
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
# 2. จำแนกประเภทของ Input
|
142 |
-
try:
|
143 |
-
history_content_list = [msg.content for msg in langchain_history]
|
144 |
-
input_type = classify_input_type(prompt, history=history_content_list)
|
145 |
-
except Exception as e:
|
146 |
-
print(f"Error classifying input type: {e}. Defaulting to Non-RAG.")
|
147 |
-
input_type = "Non-RAG"
|
148 |
-
|
149 |
-
# 3. เรียกใช้ Flow ที่เหมาะสม
|
150 |
-
if input_type == "RAG":
|
151 |
-
ai_response = await call_rag(prompt, langchain_history)
|
152 |
-
else:
|
153 |
-
ai_response = await call_non_rag(prompt)
|
154 |
-
|
155 |
-
# 4. อัปเดต History ทั้งสองรูปแบบ
|
156 |
-
langchain_history.append(AIMessage(content=ai_response))
|
157 |
-
ui_history.append((prompt, ai_response))
|
158 |
-
|
159 |
-
# 5. ส่งค่ากลับไปอัปเดต UI และ State
|
160 |
-
return ui_history, langchain_history, "" # ui_history, langchain_history, user_input (ให้เป็นค่าว่าง)
|
161 |
|
162 |
-
def
|
163 |
-
"""
|
164 |
-
|
|
|
|
|
|
|
165 |
os.makedirs("feedback", exist_ok=True)
|
166 |
filename = f"feedback/feedback_{session_id}.txt"
|
167 |
with open(filename, "a", encoding="utf-8") as f:
|
168 |
-
f.write(
|
169 |
-
|
170 |
-
|
171 |
-
f.write("
|
|
|
|
|
|
|
|
|
|
|
172 |
gr.Info("ขอบคุณสำหรับข้อเสนอแนะ!")
|
173 |
return ""
|
174 |
|
175 |
-
#
|
176 |
-
#
|
177 |
-
#
|
178 |
-
|
179 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="amber")) as demo:
|
180 |
gr.Markdown("# สอบถามเรื่องกฎหมายแรงงาน")
|
181 |
|
182 |
-
# --- States ---
|
183 |
-
# session_id_state: เก็บ ID ของ session ปัจจุบัน
|
184 |
-
# langchain_history_state: เก็บประวัติการสนทนาในรูปแบบ Langchain Message (HumanMessage, AIMessage)
|
185 |
session_id_state = gr.State()
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
|
|
|
|
|
|
|
|
190 |
user_input = gr.Textbox(placeholder="พิมพ์คำถามของคุณที่นี่...", label="คำถาม", lines=2)
|
|
|
191 |
with gr.Row():
|
192 |
submit_button = gr.Button("ส่ง", variant="primary", scale=4)
|
193 |
clear_button = gr.Button("เริ่มการสนทนาใหม่", scale=1)
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
outputs=[chatbot_interface, langchain_history_state, user_input]
|
200 |
)
|
201 |
user_input.submit(
|
202 |
-
fn=
|
203 |
-
inputs=[user_input, chatbot_interface,
|
204 |
-
outputs=[chatbot_interface,
|
205 |
)
|
|
|
206 |
clear_button.click(
|
207 |
fn=initialize_session,
|
208 |
inputs=[],
|
209 |
-
outputs=[user_input, session_id_state,
|
210 |
queue=False
|
211 |
)
|
212 |
-
|
213 |
with gr.Accordion("ส่งข้อเสนอแนะ (Feedback)", open=False):
|
214 |
feedback_input = gr.Textbox(placeholder="ความคิดเห็นของคุณมีความสำคัญต่อการพัฒนาของเรา...", label="Feedback", lines=2, scale=4)
|
215 |
send_feedback_button = gr.Button("ส่ง Feedback")
|
216 |
-
|
217 |
send_feedback_button.click(
|
218 |
-
fn=
|
219 |
inputs=[feedback_input, chatbot_interface, session_id_state],
|
220 |
outputs=[feedback_input],
|
221 |
queue=False
|
@@ -224,7 +99,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="amber")) as demo:
|
|
224 |
demo.load(
|
225 |
fn=initialize_session,
|
226 |
inputs=[],
|
227 |
-
outputs=[user_input, session_id_state,
|
228 |
)
|
229 |
|
230 |
demo.queue().launch()
|
|
|
5 |
import uuid
|
6 |
from utils.chat import ChatLaborLaw
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
def initialize_session():
|
10 |
+
"""
|
11 |
+
สร้าง session ID ใหม่ และสร้าง instance ใหม่ของ ChatLaborLaw
|
12 |
+
"""
|
13 |
session_id = str(uuid.uuid4())[:8]
|
14 |
+
chatbot_instance = ChatLaborLaw()
|
15 |
+
return "", session_id, chatbot_instance, []
|
16 |
|
17 |
+
async def chat_function(prompt: str, history_ui: list, chatbot_instance: ChatLaborLaw):
|
18 |
"""
|
19 |
+
จัดการการสนทนา
|
20 |
"""
|
21 |
+
if chatbot_instance is None or not prompt.strip():
|
22 |
+
return history_ui, chatbot_instance, ""
|
23 |
+
|
24 |
+
response = await chatbot_instance.chat(prompt)
|
25 |
+
history_ui.append((prompt, response))
|
26 |
+
return history_ui, chatbot_instance, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
def save_feedback(feedback: str, history_ui: list, session_id: str):
|
29 |
+
"""
|
30 |
+
บันทึก Feedback
|
31 |
+
"""
|
32 |
+
if not feedback.strip():
|
33 |
+
return ""
|
34 |
os.makedirs("feedback", exist_ok=True)
|
35 |
filename = f"feedback/feedback_{session_id}.txt"
|
36 |
with open(filename, "a", encoding="utf-8") as f:
|
37 |
+
f.write("=== Feedback Received ===\n")
|
38 |
+
f.write(f"Session ID: {session_id}\n")
|
39 |
+
f.write(f"Feedback: {feedback}\n\n")
|
40 |
+
f.write("Chat History:\n")
|
41 |
+
for user_msg, assistant_msg in history_ui:
|
42 |
+
f.write(f"User: {user_msg}\n")
|
43 |
+
f.write(f"Assistant: {assistant_msg}\n")
|
44 |
+
f.write("-" * 20 + "\n")
|
45 |
+
f.write("\n==========================\n\n")
|
46 |
gr.Info("ขอบคุณสำหรับข้อเสนอแนะ!")
|
47 |
return ""
|
48 |
|
49 |
+
# --------------------------------------------------------------------------
|
50 |
+
# สร้าง Gradio Interface
|
51 |
+
# --------------------------------------------------------------------------
|
|
|
52 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="amber")) as demo:
|
53 |
gr.Markdown("# สอบถามเรื่องกฎหมายแรงงาน")
|
54 |
|
|
|
|
|
|
|
55 |
session_id_state = gr.State()
|
56 |
+
chatbot_instance_state = gr.State()
|
57 |
+
|
58 |
+
chatbot_interface = gr.Chatbot(
|
59 |
+
label="ประวัติการสนทนา",
|
60 |
+
height=550,
|
61 |
+
bubble_styling=False,
|
62 |
+
show_copy_button=True
|
63 |
+
)
|
64 |
user_input = gr.Textbox(placeholder="พิมพ์คำถามของคุณที่นี่...", label="คำถาม", lines=2)
|
65 |
+
|
66 |
with gr.Row():
|
67 |
submit_button = gr.Button("ส่ง", variant="primary", scale=4)
|
68 |
clear_button = gr.Button("เริ่มการสนทนาใหม่", scale=1)
|
69 |
|
70 |
+
submit_event = submit_button.click(
|
71 |
+
fn=chat_function,
|
72 |
+
inputs=[user_input, chatbot_interface, chatbot_instance_state],
|
73 |
+
outputs=[chatbot_interface, chatbot_instance_state, user_input]
|
|
|
74 |
)
|
75 |
user_input.submit(
|
76 |
+
fn=chat_function,
|
77 |
+
inputs=[user_input, chatbot_interface, chatbot_instance_state],
|
78 |
+
outputs=[chatbot_interface, chatbot_instance_state, user_input]
|
79 |
)
|
80 |
+
|
81 |
clear_button.click(
|
82 |
fn=initialize_session,
|
83 |
inputs=[],
|
84 |
+
outputs=[user_input, session_id_state, chatbot_instance_state, chatbot_interface],
|
85 |
queue=False
|
86 |
)
|
87 |
+
|
88 |
with gr.Accordion("ส่งข้อเสนอแนะ (Feedback)", open=False):
|
89 |
feedback_input = gr.Textbox(placeholder="ความคิดเห็นของคุณมีความสำคัญต่อการพัฒนาของเรา...", label="Feedback", lines=2, scale=4)
|
90 |
send_feedback_button = gr.Button("ส่ง Feedback")
|
91 |
+
|
92 |
send_feedback_button.click(
|
93 |
+
fn=save_feedback,
|
94 |
inputs=[feedback_input, chatbot_interface, session_id_state],
|
95 |
outputs=[feedback_input],
|
96 |
queue=False
|
|
|
99 |
demo.load(
|
100 |
fn=initialize_session,
|
101 |
inputs=[],
|
102 |
+
outputs=[user_input, session_id_state, chatbot_instance_state, chatbot_interface]
|
103 |
)
|
104 |
|
105 |
demo.queue().launch()
|