Update app.py
Browse files
app.py
CHANGED
@@ -35,277 +35,277 @@ with open("./static/styles.css") as f:
|
|
35 |
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
|
36 |
|
37 |
# ==== GIAO DIỆN CHÍNH - TABS ====
|
38 |
-
tab1, tab2 = st.tabs(["🤖 ViBidLQA Chatbot", "🔐 Facebook OAuth"])
|
39 |
|
40 |
# =============================
|
41 |
# TAB 1: VIBIDLQA CHATBOT
|
42 |
# =============================
|
43 |
-
with tab1:
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
response = requests.post(url_api_question_classify_model, json=data)
|
60 |
-
|
61 |
-
if response.status_code == 200:
|
62 |
-
print(response)
|
63 |
-
return response
|
64 |
-
else:
|
65 |
-
return f"Lỗi: {response.status_code} - {response.text}"
|
66 |
-
|
67 |
-
def introduce_system(question):
|
68 |
-
data = {
|
69 |
-
"question": question
|
70 |
-
}
|
71 |
-
|
72 |
-
response = requests.post(url_api_introduce_system_model, json=data, stream=True)
|
73 |
-
|
74 |
-
if response.status_code == 200:
|
75 |
-
return response
|
76 |
-
else:
|
77 |
-
return f"Lỗi: {response.status_code} - {response.text}"
|
78 |
-
|
79 |
-
def response_unrelated_question(question):
|
80 |
-
data = {
|
81 |
-
"question": question
|
82 |
-
}
|
83 |
|
84 |
-
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
-
|
100 |
-
results = response.json()["results"]
|
101 |
-
return results
|
102 |
-
else:
|
103 |
-
return f"Lỗi tại Retrieval Module: {response.status_code} - {response.text}"
|
104 |
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
}
|
111 |
|
112 |
-
|
|
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
retrieved_context = [item['text'] for item in retrieved_context]
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
-
data = {
|
130 |
-
"context": reranked_context,
|
131 |
-
"question": question
|
132 |
-
}
|
133 |
-
|
134 |
-
response = requests.post(url_api_generation_model, json=data, stream=True)
|
135 |
-
|
136 |
-
if response.status_code == 200:
|
137 |
-
return response
|
138 |
-
else:
|
139 |
-
return f"Lỗi: {response.status_code} - {response.text}"
|
140 |
-
|
141 |
-
def generate_text_effect(answer):
|
142 |
-
words = answer.split()
|
143 |
-
for i in range(len(words)):
|
144 |
-
time.sleep(0.03)
|
145 |
-
yield " ".join(words[:i+1])
|
146 |
-
|
147 |
-
for message in st.session_state.messages:
|
148 |
-
if message['role'] == 'assistant':
|
149 |
-
avatar_class = "assistant-avatar"
|
150 |
-
message_class = "assistant-message"
|
151 |
-
avatar = './app/static/ai.jpg'
|
152 |
else:
|
153 |
-
|
154 |
-
message_class = "user-message"
|
155 |
-
avatar = ''
|
156 |
-
st.markdown(f"""
|
157 |
-
<div class="{message_class}">
|
158 |
-
<img src="{avatar}" class="{avatar_class}" />
|
159 |
-
<div class="stMarkdown">{message['content']}</div>
|
160 |
-
</div>
|
161 |
-
""", unsafe_allow_html=True)
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
<div class="
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
message_placeholder = st.empty()
|
172 |
-
|
173 |
-
full_response = ""
|
174 |
-
try:
|
175 |
-
classify_result = classify_question(question=prompt).json()
|
176 |
-
|
177 |
-
print(f"The type of user query: {classify_result}")
|
178 |
-
|
179 |
-
if classify_result == "BIDDING_RELATED":
|
180 |
-
abs_answer = get_abstractive_answer(question=prompt)
|
181 |
-
|
182 |
-
if isinstance(abs_answer, str):
|
183 |
-
full_response = abs_answer
|
184 |
-
message_placeholder.markdown(f"""
|
185 |
-
<div class="assistant-message">
|
186 |
-
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
187 |
-
<div class="stMarkdown">{full_response}</div>
|
188 |
-
</div>
|
189 |
-
""", unsafe_allow_html=True)
|
190 |
-
else:
|
191 |
-
full_response = ""
|
192 |
-
for line in abs_answer.iter_lines():
|
193 |
-
if line:
|
194 |
-
line = line.decode('utf-8')
|
195 |
-
if line.startswith('data: '):
|
196 |
-
data_str = line[6:]
|
197 |
-
if data_str == '[DONE]':
|
198 |
-
break
|
199 |
-
|
200 |
-
try:
|
201 |
-
data = json.loads(data_str)
|
202 |
-
token = data.get('token', '')
|
203 |
-
full_response += token
|
204 |
-
|
205 |
-
message_placeholder.markdown(f"""
|
206 |
-
<div class="assistant-message">
|
207 |
-
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
208 |
-
<div class="stMarkdown">{full_response}●</div>
|
209 |
-
</div>
|
210 |
-
""", unsafe_allow_html=True)
|
211 |
-
|
212 |
-
except json.JSONDecodeError:
|
213 |
-
pass
|
214 |
-
|
215 |
-
elif classify_result == "ABOUT_CHATBOT":
|
216 |
-
answer = introduce_system(question=prompt)
|
217 |
-
|
218 |
-
if isinstance(answer, str):
|
219 |
-
full_response = answer
|
220 |
-
message_placeholder.markdown(f"""
|
221 |
-
<div class="assistant-message">
|
222 |
-
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
223 |
-
<div class="stMarkdown">{full_response}</div>
|
224 |
-
</div>
|
225 |
-
""", unsafe_allow_html=True)
|
226 |
-
else:
|
227 |
-
full_response = ""
|
228 |
-
for line in answer.iter_lines():
|
229 |
-
if line:
|
230 |
-
line = line.decode('utf-8')
|
231 |
-
if line.startswith('data: '):
|
232 |
-
data_str = line[6:]
|
233 |
-
if data_str == '[DONE]':
|
234 |
-
break
|
235 |
-
|
236 |
-
try:
|
237 |
-
data = json.loads(data_str)
|
238 |
-
token = data.get('token', '')
|
239 |
-
full_response += token
|
240 |
-
|
241 |
-
message_placeholder.markdown(f"""
|
242 |
-
<div class="assistant-message">
|
243 |
-
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
244 |
-
<div class="stMarkdown">{full_response}●</div>
|
245 |
-
</div>
|
246 |
-
""", unsafe_allow_html=True)
|
247 |
-
|
248 |
-
except json.JSONDecodeError:
|
249 |
-
pass
|
250 |
-
|
251 |
else:
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
if line:
|
266 |
-
line = line.decode('utf-8')
|
267 |
-
if line.startswith('data: '):
|
268 |
-
data_str = line[6:]
|
269 |
-
if data_str == '[DONE]':
|
270 |
-
break
|
271 |
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
full_response
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
|
|
|
|
|
|
|
|
288 |
|
289 |
-
|
290 |
-
<div class="assistant-message">
|
291 |
-
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
292 |
-
<div class="stMarkdown">
|
293 |
-
{full_response}
|
294 |
-
</div>
|
295 |
-
</div>
|
296 |
-
""", unsafe_allow_html=True)
|
297 |
-
|
298 |
-
st.session_state.messages.append({'role': 'assistant', 'content': full_response})
|
299 |
|
300 |
# =============================
|
301 |
# TAB 2: FACEBOOK OAUTH
|
302 |
# =============================
|
303 |
-
with tab2:
|
304 |
-
|
305 |
|
306 |
-
|
307 |
-
|
308 |
|
309 |
-
|
310 |
|
311 |
-
|
|
|
35 |
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
|
36 |
|
37 |
# ==== GIAO DIỆN CHÍNH - TABS ====
|
38 |
+
# tab1, tab2 = st.tabs(["🤖 ViBidLQA Chatbot", "🔐 Facebook OAuth"])
|
39 |
|
40 |
# =============================
|
41 |
# TAB 1: VIBIDLQA CHATBOT
|
42 |
# =============================
|
43 |
+
# with tab1:
|
44 |
+
if 'messages' not in st.session_state:
|
45 |
+
st.session_state.messages = [{'role': 'assistant', 'content': "Xin chào. Tôi là trợ lý AI văn bản luật Đấu thầu Việt Nam được phát triển bởi Nguyễn Trường Phúc và các cộng sự. Rất vui khi được hỗ trợ bạn trong các vấn đề pháp lý tại Việt Nam!"}]
|
46 |
+
|
47 |
+
st.markdown(f"""
|
48 |
+
<div class=logo_area>
|
49 |
+
<img src="./app/static/ai.jpg"/>
|
50 |
+
</div>
|
51 |
+
""", unsafe_allow_html=True)
|
52 |
+
st.markdown("<h2 style='text-align: center;'>ViBidLQA</h2>", unsafe_allow_html=True)
|
53 |
+
|
54 |
+
def classify_question(question):
|
55 |
+
data = {
|
56 |
+
"question": question
|
57 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
response = requests.post(url_api_question_classify_model, json=data)
|
60 |
|
61 |
+
if response.status_code == 200:
|
62 |
+
print(response)
|
63 |
+
return response
|
64 |
+
else:
|
65 |
+
return f"Lỗi: {response.status_code} - {response.text}"
|
66 |
+
|
67 |
+
def introduce_system(question):
|
68 |
+
data = {
|
69 |
+
"question": question
|
70 |
+
}
|
71 |
+
|
72 |
+
response = requests.post(url_api_introduce_system_model, json=data, stream=True)
|
73 |
+
|
74 |
+
if response.status_code == 200:
|
75 |
+
return response
|
76 |
+
else:
|
77 |
+
return f"Lỗi: {response.status_code} - {response.text}"
|
78 |
|
79 |
+
def response_unrelated_question(question):
|
80 |
+
data = {
|
81 |
+
"question": question
|
82 |
+
}
|
83 |
+
|
84 |
+
response = requests.post(url_api_unrelated_question_response_model, json=data, stream=True)
|
85 |
+
|
86 |
+
if response.status_code == 200:
|
87 |
+
return response
|
88 |
+
else:
|
89 |
+
return f"Lỗi: {response.status_code} - {response.text}"
|
90 |
+
|
91 |
+
def retrieve_context(question, top_k=10):
|
92 |
+
data = {
|
93 |
+
"query": question,
|
94 |
+
"top_k": top_k
|
95 |
+
}
|
96 |
+
|
97 |
+
response = requests.post(url_api_retrieval_model, json=data)
|
98 |
+
|
99 |
+
if response.status_code == 200:
|
100 |
+
results = response.json()["results"]
|
101 |
+
return results
|
102 |
+
else:
|
103 |
+
return f"Lỗi tại Retrieval Module: {response.status_code} - {response.text}"
|
104 |
+
|
105 |
+
def rerank_context(url_rerank_module, question, relevant_docs, top_k=5):
|
106 |
+
data = {
|
107 |
+
"question": question,
|
108 |
+
"relevant_docs": relevant_docs,
|
109 |
+
"top_k": top_k
|
110 |
+
}
|
111 |
+
|
112 |
+
response = requests.post(url_rerank_module, json=data)
|
113 |
+
|
114 |
+
if response.status_code == 200:
|
115 |
+
results = response.json()["reranked_docs"]
|
116 |
+
return results
|
117 |
+
else:
|
118 |
+
return f"Lỗi tại Rerank module: {response.status_code} - {response.text}"
|
119 |
+
|
120 |
+
def get_abstractive_answer(question):
|
121 |
+
retrieved_context = retrieve_context(question=question)
|
122 |
+
retrieved_context = [item['text'] for item in retrieved_context]
|
123 |
+
|
124 |
+
reranked_context = rerank_context(url_rerank_module=url_api_reranker_model,
|
125 |
+
question=question,
|
126 |
+
relevant_docs=retrieved_context,
|
127 |
+
top_k=5)[0]
|
128 |
|
129 |
+
data = {
|
130 |
+
"context": reranked_context,
|
131 |
+
"question": question
|
132 |
+
}
|
133 |
+
|
134 |
+
response = requests.post(url_api_generation_model, json=data, stream=True)
|
135 |
+
|
136 |
+
if response.status_code == 200:
|
137 |
+
return response
|
138 |
+
else:
|
139 |
+
return f"Lỗi: {response.status_code} - {response.text}"
|
140 |
+
|
141 |
+
def generate_text_effect(answer):
|
142 |
+
words = answer.split()
|
143 |
+
for i in range(len(words)):
|
144 |
+
time.sleep(0.03)
|
145 |
+
yield " ".join(words[:i+1])
|
146 |
+
|
147 |
+
for message in st.session_state.messages:
|
148 |
+
if message['role'] == 'assistant':
|
149 |
+
avatar_class = "assistant-avatar"
|
150 |
+
message_class = "assistant-message"
|
151 |
+
avatar = './app/static/ai.jpg'
|
152 |
+
else:
|
153 |
+
avatar_class = ""
|
154 |
+
message_class = "user-message"
|
155 |
+
avatar = ''
|
156 |
+
st.markdown(f"""
|
157 |
+
<div class="{message_class}">
|
158 |
+
<img src="{avatar}" class="{avatar_class}" />
|
159 |
+
<div class="stMarkdown">{message['content']}</div>
|
160 |
+
</div>
|
161 |
+
""", unsafe_allow_html=True)
|
162 |
+
|
163 |
+
if prompt := st.chat_input(placeholder='Tôi có thể giúp được gì cho bạn?'):
|
164 |
+
st.markdown(f"""
|
165 |
+
<div class="user-message">
|
166 |
+
<div class="stMarkdown">{prompt}</div>
|
167 |
+
</div>
|
168 |
+
""", unsafe_allow_html=True)
|
169 |
+
st.session_state.messages.append({'role': 'user', 'content': prompt})
|
170 |
|
171 |
+
message_placeholder = st.empty()
|
|
|
|
|
|
|
|
|
172 |
|
173 |
+
full_response = ""
|
174 |
+
try:
|
175 |
+
classify_result = classify_question(question=prompt).json()
|
176 |
+
|
177 |
+
print(f"The type of user query: {classify_result}")
|
|
|
178 |
|
179 |
+
if classify_result == "BIDDING_RELATED":
|
180 |
+
abs_answer = get_abstractive_answer(question=prompt)
|
181 |
|
182 |
+
if isinstance(abs_answer, str):
|
183 |
+
full_response = abs_answer
|
184 |
+
message_placeholder.markdown(f"""
|
185 |
+
<div class="assistant-message">
|
186 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
187 |
+
<div class="stMarkdown">{full_response}</div>
|
188 |
+
</div>
|
189 |
+
""", unsafe_allow_html=True)
|
190 |
+
else:
|
191 |
+
full_response = ""
|
192 |
+
for line in abs_answer.iter_lines():
|
193 |
+
if line:
|
194 |
+
line = line.decode('utf-8')
|
195 |
+
if line.startswith('data: '):
|
196 |
+
data_str = line[6:]
|
197 |
+
if data_str == '[DONE]':
|
198 |
+
break
|
199 |
+
|
200 |
+
try:
|
201 |
+
data = json.loads(data_str)
|
202 |
+
token = data.get('token', '')
|
203 |
+
full_response += token
|
204 |
+
|
205 |
+
message_placeholder.markdown(f"""
|
206 |
+
<div class="assistant-message">
|
207 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
208 |
+
<div class="stMarkdown">{full_response}●</div>
|
209 |
+
</div>
|
210 |
+
""", unsafe_allow_html=True)
|
211 |
+
|
212 |
+
except json.JSONDecodeError:
|
213 |
+
pass
|
214 |
|
215 |
+
elif classify_result == "ABOUT_CHATBOT":
|
216 |
+
answer = introduce_system(question=prompt)
|
|
|
217 |
|
218 |
+
if isinstance(answer, str):
|
219 |
+
full_response = answer
|
220 |
+
message_placeholder.markdown(f"""
|
221 |
+
<div class="assistant-message">
|
222 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
223 |
+
<div class="stMarkdown">{full_response}</div>
|
224 |
+
</div>
|
225 |
+
""", unsafe_allow_html=True)
|
226 |
+
else:
|
227 |
+
full_response = ""
|
228 |
+
for line in answer.iter_lines():
|
229 |
+
if line:
|
230 |
+
line = line.decode('utf-8')
|
231 |
+
if line.startswith('data: '):
|
232 |
+
data_str = line[6:]
|
233 |
+
if data_str == '[DONE]':
|
234 |
+
break
|
235 |
+
|
236 |
+
try:
|
237 |
+
data = json.loads(data_str)
|
238 |
+
token = data.get('token', '')
|
239 |
+
full_response += token
|
240 |
+
|
241 |
+
message_placeholder.markdown(f"""
|
242 |
+
<div class="assistant-message">
|
243 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
244 |
+
<div class="stMarkdown">{full_response}●</div>
|
245 |
+
</div>
|
246 |
+
""", unsafe_allow_html=True)
|
247 |
+
|
248 |
+
except json.JSONDecodeError:
|
249 |
+
pass
|
250 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
else:
|
252 |
+
answer = response_unrelated_question(question=prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
|
254 |
+
if isinstance(answer, str):
|
255 |
+
full_response = answer
|
256 |
+
message_placeholder.markdown(f"""
|
257 |
+
<div class="assistant-message">
|
258 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
259 |
+
<div class="stMarkdown">{full_response}</div>
|
260 |
+
</div>
|
261 |
+
""", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
else:
|
263 |
+
full_response = ""
|
264 |
+
for line in answer.iter_lines():
|
265 |
+
if line:
|
266 |
+
line = line.decode('utf-8')
|
267 |
+
if line.startswith('data: '):
|
268 |
+
data_str = line[6:]
|
269 |
+
if data_str == '[DONE]':
|
270 |
+
break
|
271 |
+
|
272 |
+
try:
|
273 |
+
data = json.loads(data_str)
|
274 |
+
token = data.get('token', '')
|
275 |
+
full_response += token
|
|
|
|
|
|
|
|
|
|
|
|
|
276 |
|
277 |
+
message_placeholder.markdown(f"""
|
278 |
+
<div class="assistant-message">
|
279 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
280 |
+
<div class="stMarkdown">{full_response}●</div>
|
281 |
+
</div>
|
282 |
+
""", unsafe_allow_html=True)
|
283 |
+
|
284 |
+
except json.JSONDecodeError:
|
285 |
+
pass
|
286 |
+
except Exception as e:
|
287 |
+
full_response = "Hiện tại trợ lý AI đang nghỉ xíu để sạc pin 🔌. Bạn hãy quay lại sau nhé!"
|
288 |
+
|
289 |
+
message_placeholder.markdown(f"""
|
290 |
+
<div class="assistant-message">
|
291 |
+
<img src="./app/static/ai.jpg" class="assistant-avatar" />
|
292 |
+
<div class="stMarkdown">
|
293 |
+
{full_response}
|
294 |
+
</div>
|
295 |
+
</div>
|
296 |
+
""", unsafe_allow_html=True)
|
297 |
|
298 |
+
st.session_state.messages.append({'role': 'assistant', 'content': full_response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
# =============================
|
301 |
# TAB 2: FACEBOOK OAUTH
|
302 |
# =============================
|
303 |
+
# with tab2:
|
304 |
+
# st.title("Đăng nhập Facebook để lấy Page Access Token")
|
305 |
|
306 |
+
# # Tạo link login
|
307 |
+
# login_url = f"{FB_BACKEND_URL}/facebook/login"
|
308 |
|
309 |
+
# st.markdown(f"[👉 Bấm vào đây để đăng nhập Facebook]({login_url})")
|
310 |
|
311 |
+
# st.info("Sau khi đăng nhập xong, bạn có thể quay lại ứng dụng này. Thông tin page đã được in ra ở backend.")
|