Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 40,230 Bytes
c83f30f 054da8d 5a4f54c 054da8d fba1e90 054da8d fba1e90 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 |
import gradio as gr
import pandas as pd
import logging
import asyncio
import os
import time
from uuid import uuid4
from datetime import datetime, timedelta
from pathlib import Path
from huggingface_hub import CommitScheduler, HfApi
from auditqa.sample_questions import QUESTIONS
from auditqa.reports import files, report_list, new_files, new_report_list
from auditqa.process_chunks import load_chunks, getconfig, get_local_qdrant
from auditqa.retriever import get_context
from auditqa.reader import nvidia_client, dedicated_endpoint, serverless_api, inf_provider
from auditqa.utils import make_html_source, parse_output_llm_with_sources, save_logs, get_message_template, get_client_location, get_client_ip, get_platform_info
from dotenv import load_dotenv
load_dotenv()
from threading import Lock
from gradio.routes import Request
import json
#import platform
#print(platform.python_version())
# fetch tokens and model config params
SPACES_LOG = os.environ["SPACES_LOG"]
#audit_space = os.environ["audit_space"]
model_config = getconfig("model_params.cfg")
# create the local logs repo
JSON_DATASET_DIR = Path("json_dataset")
JSON_DATASET_DIR.mkdir(parents=True, exist_ok=True)
JSON_DATASET_PATH = JSON_DATASET_DIR / f"logs-{uuid4()}.json"
# the logs are written to dataset repo periodically from local logs
# https://huggingface.co/spaces/Wauplin/space_to_dataset_saver
scheduler = CommitScheduler(
repo_id="GIZ/spaces_logs",
repo_type="dataset",
folder_path=JSON_DATASET_DIR,
path_in_repo="eudr_chatbot", # Changed for EUDR
token=SPACES_LOG )
#####--------------- VECTOR STORE -------------------------------------------------
# reports contain the already created chunks from Markdown version of pdf reports
# document processing was done using : https://github.com/axa-group/Parsr
# We need to create the local vectorstore collection once using load_chunks
# vectorestore colection are stored on persistent storage so this needs to be run only once
# hence, comment out line below when creating for first time
#vectorstores = load_chunks()
# once the vectore embeddings are created we will use qdrant client to access these
#try:
vectorstores = get_local_qdrant()
#except Exception as e:
# api = HfApi()
# api.restart_space(repo_id = "GIZ/eudr_assistant", token=audit_space) # Changed for EUDR
#####---------------------CHAT-----------------------------------------------------
def start_chat(query,history):
history = history + [(query,None)]
history = [tuple(x) for x in history]
return (gr.update(interactive = False),gr.update(selected=1),history)
def finish_chat():
return (gr.update(interactive = True,value = ""))
def submit_feedback(feedback, logs_data):
"""Handle feedback submission"""
try:
if logs_data is None:
return gr.update(visible=False), gr.update(visible=True)
session_id = logs_data.get("session_id")
if session_id:
# Update session last_activity to now
session_manager.update_session(session_id)
# Compute duration from the session manager and update the log.
logs_data["session_duration_seconds"] = session_manager.get_session_duration(session_id)
# Now save the (feedback) log record
save_logs(scheduler, JSON_DATASET_PATH, logs_data, feedback)
return gr.update(visible=False), gr.update(visible=True)
except Exception as e:
return gr.update(visible=False), gr.update(visible=True)
def handle_geojson_upload(file):
"""Handle GeoJSON file upload"""
if file is not None:
return "✅ Document submitted successfully! You can now ask questions about this GeoJSON file."
else:
return "❌ Please select a GeoJSON file to upload."
# Session Manager added (track session duration, location, and platform)
class SessionManager:
def __init__(self):
self.sessions = {}
def create_session(self, client_ip, user_agent):
session_id = str(uuid4())
self.sessions[session_id] = {
'start_time': datetime.now(),
'last_activity': datetime.now(),
'client_ip': client_ip,
'location_info': get_client_location(client_ip),
'platform_info': get_platform_info(user_agent)
}
return session_id
def update_session(self, session_id):
if session_id in self.sessions:
self.sessions[session_id]['last_activity'] = datetime.now()
def get_session_duration(self, session_id):
if session_id in self.sessions:
start = self.sessions[session_id]['start_time']
last = self.sessions[session_id]['last_activity']
return (last - start).total_seconds()
return 0
def get_session_data(self, session_id):
return self.sessions.get(session_id)
# Initialize session manager
session_manager = SessionManager()
async def chat(query, history, method, country, uploaded_file, client_ip=None, session_id=None, request:gr.Request = None):
"""taking a query and a message history, use a pipeline (reformulation, retriever, answering)
to yield a tuple of:(messages in gradio format/messages in langchain format, source documents)
"""
if not session_id:
user_agent = request.headers.get('User-Agent','') if request else ''
session_id = session_manager.create_session(client_ip, user_agent)
else:
session_manager.update_session(session_id)
# Get session id
session_data = session_manager.get_session_data(session_id)
session_duration = session_manager.get_session_duration(session_id)
print(f">> NEW QUESTION : {query}")
print(f"history:{history}")
print(f"method:{method}")
print(f"country:{country}")
print(f"uploaded_file:{uploaded_file}")
docs_html = ""
output_query = ""
# Handle different methods
if method == "Upload GeoJSON":
if uploaded_file is None:
warning_message = "⚠️ **No GeoJSON file uploaded.** Please upload a GeoJSON file first."
history[-1] = (query, warning_message)
# Update logs with the warning instead of answer
logs_data = {
"record_id": str(uuid4()),
"session_id": session_id,
"session_duration_seconds": session_duration,
"client_location": session_data['location_info'],
"platform": session_data['platform_info'],
"question": query,
"method": method,
"uploaded_file": str(uploaded_file) if uploaded_file else None,
"retriever": model_config.get('retriever','MODEL'),
"endpoint_type": model_config.get('reader','TYPE'),
"reader": model_config.get('reader','NVIDIA_MODEL'),
"answer": warning_message,
"no_results": True # Flag to indicate no results were found
}
yield [tuple(x) for x in history], "", logs_data, session_id
# Save log for the warning response
save_logs(scheduler, JSON_DATASET_PATH, logs_data)
return
else: # "Talk to Reports"
if not country:
warning_message = "⚠️ **No country selected.** Please select a country to analyze reports."
history[-1] = (query, warning_message)
# Update logs with the warning instead of answer
logs_data = {
"record_id": str(uuid4()),
"session_id": session_id,
"session_duration_seconds": session_duration,
"client_location": session_data['location_info'],
"platform": session_data['platform_info'],
"question": query,
"method": method,
"country": country,
"retriever": model_config.get('retriever','MODEL'),
"endpoint_type": model_config.get('reader','TYPE'),
"reader": model_config.get('reader','NVIDIA_MODEL'),
"answer": warning_message,
"no_results": True # Flag to indicate no results were found
}
yield [tuple(x) for x in history], "", logs_data, session_id
# Save log for the warning response
save_logs(scheduler, JSON_DATASET_PATH, logs_data)
return
##------------------------fetch collection from vectorstore------------------------------
vectorstore = vectorstores["docling"]
##------------------------------get context----------------------------------------------
### adding for assessing computation time
start_time = time.time()
# Modified context retrieval for EUDR use case
if method == "Upload GeoJSON":
# For GeoJSON, use the uploaded file as context
# This is a placeholder - you'll need to implement GeoJSON processing
context_retrieved = [] # Implement GeoJSON processing here
else: # "Talk to Reports"
# For reports, filter by country
context_retrieved = get_context(vectorstore=vectorstore, query=query,
country=country) # Modified to use country instead of reports/sources
end_time = time.time()
print("Time for retriever:",end_time - start_time)
if not context_retrieved or len(context_retrieved) == 0:
if method == "Upload GeoJSON":
warning_message = "⚠️ **No relevant information found in the uploaded GeoJSON file.** Please try rephrasing your question or check your file."
else:
warning_message = f"⚠️ **No relevant information found for {country}.** Please try rephrasing your question or selecting a different country."
history[-1] = (query, warning_message)
# Update logs with the warning instead of answer
logs_data = {
"record_id": str(uuid4()),
"session_id": session_id,
"session_duration_seconds": session_duration,
"client_location": session_data['location_info'],
"platform": session_data['platform_info'],
"question": query,
"method": method,
"country": country if method == "Talk to Reports" else None,
"uploaded_file": str(uploaded_file) if method == "Upload GeoJSON" else None,
"retriever": model_config.get('retriever','MODEL'),
"endpoint_type": model_config.get('reader','TYPE'),
"reader": model_config.get('reader','NVIDIA_MODEL'),
"answer": warning_message,
"no_results": True # Flag to indicate no results were found
}
yield [tuple(x) for x in history], "", logs_data, session_id
# Save log for the warning response
save_logs(scheduler, JSON_DATASET_PATH, logs_data)
return
context_retrieved_formatted = "||".join(doc.page_content for doc in context_retrieved)
context_retrieved_lst = [doc.page_content for doc in context_retrieved]
##------------------- -------------Define Prompt-------------------------------------------
SYSTEM_PROMPT = """
You are EUDR Q&A, an AI Assistant for EU Deforestation Regulation analysis. \
You are given a question and extracted passages related to EUDR compliance and deforestation data.\
Provide a clear and structured answer based on the passages/context provided and the guidelines.
Guidelines:
- Passages are provided as comma separated list of strings
- If the passages have useful facts or numbers, use them in your answer.
- When you use information from a passage, mention where it came from by using [Doc i] at the end of the sentence. i stands for the number of the document.
- Do not use the sentence 'Doc i says ...' to say where information came from.
- If the same thing is said in more than one document, you can mention all of them like this: [Doc i, Doc j, Doc k]
- Do not just summarize each passage one by one. Group your summaries to highlight the key parts in the explanation.
- If it makes sense, use bullet points and lists to make your answers easier to understand.
- You do not need to use every passage. Only use the ones that help answer the question.
- If the documents do not have the information needed to answer the question, just say you do not have enough information.
"""
USER_PROMPT = """Passages:
{context}
-----------------------
Question: {question} - Explained for EUDR compliance expert
Answer in english with the passages citations:
""".format(context = context_retrieved_lst, question=query)
##-------------------- apply message template ------------------------------
messages = get_message_template(model_config.get('reader','TYPE'),SYSTEM_PROMPT,USER_PROMPT)
## -----------------Prepare HTML for displaying source documents --------------
docs_html = []
for i, d in enumerate(context_retrieved, 1):
docs_html.append(make_html_source(d, i))
docs_html = "".join(docs_html)
##-----------------------get answer from endpoints------------------------------
answer_yet = ""
logs_data = {
"record_id": str(uuid4()), # Add unique record ID
"session_id": session_id,
"session_duration_seconds": session_duration,
"client_location": session_data['location_info'],
"platform": session_data['platform_info'],
"system_prompt": SYSTEM_PROMPT,
"method": method,
"country": country if method == "Talk to Reports" else None,
"uploaded_file": str(uploaded_file) if method == "Upload GeoJSON" else None,
"question": query,
"retriever": model_config.get('retriever','MODEL'),
"endpoint_type": model_config.get('reader','TYPE'),
"reader": model_config.get('reader','NVIDIA_MODEL'),
"docs": [doc.page_content for doc in context_retrieved],
}
# Keep the same endpoint logic as the original...
if model_config.get('reader','TYPE') == 'NVIDIA':
chat_model = nvidia_client()
async def process_stream():
nonlocal answer_yet # Use the outer scope's answer_yet variable
response = chat_model.chat_completion(
model=model_config.get("reader","NVIDIA_MODEL"),
messages=messages,
stream=True,
max_tokens=int(model_config.get('reader','MAX_TOKENS')),
)
for message in response:
token = message.choices[0].delta.content
if token:
answer_yet += token
parsed_answer = parse_output_llm_with_sources(answer_yet)
history[-1] = (query, parsed_answer)
logs_data["answer"] = parsed_answer
yield [tuple(x) for x in history], docs_html, logs_data, session_id
# Stream the response updates
async for update in process_stream():
yield update
elif model_config.get('reader','TYPE') == 'INF_PROVIDERS':
chat_model = inf_provider()
start_time = time.time()
ai_prefix = "**AI-Generated Response:**\n\n"
async def process_stream():
nonlocal answer_yet
answer_yet += ai_prefix
response = chat_model.chat.completions.create(
model=model_config.get("reader","INF_PROVIDER_MODEL"),
messages = messages,
stream= True,
max_tokens=int(model_config.get('reader','MAX_TOKENS')),
)
for message in response:
token = message.choices[0].delta.content
if token:
answer_yet += token
parsed_answer = parse_output_llm_with_sources(answer_yet)
history[-1] = (query, parsed_answer)
logs_data["answer"] = parsed_answer
yield [tuple(x) for x in history], docs_html, logs_data, session_id
await asyncio.sleep(0.05)
# Stream the response updates
async for update in process_stream():
yield update
elif model_config.get('reader','TYPE') == 'DEDICATED':
chat_model = dedicated_endpoint()
### adding for assessing computation time
start_time = time.time()
async def process_stream():
nonlocal answer_yet # Use the outer scope's answer_yet variable
# Iterate over the streaming response chunks
async for chunk in chat_model.astream(messages):
token = chunk.content
answer_yet += token
parsed_answer = parse_output_llm_with_sources(answer_yet)
history[-1] = (query, parsed_answer)
logs_data["answer"] = parsed_answer
yield [tuple(x) for x in history], docs_html, logs_data, session_id
end_time = time.time()
print("Time for reader:",end_time - start_time)
# Stream the response updates
async for update in process_stream():
yield update
else:
chat_model = serverless_api() # TESTING: ADAPTED FOR HF INFERENCE API (needs to be reverted for production version)
async def process_stream():
nonlocal answer_yet
try:
formatted_messages = [
{
"role": msg.type if hasattr(msg, 'type') else msg.role,
"content": msg.content
}
for msg in messages
]
response = chat_model.chat_completion(
messages=formatted_messages,
max_tokens= int(model_config.get('reader', 'MAX_TOKENS'))
)
response_text = response.choices[0].message.content
words = response_text.split()
for word in words:
answer_yet += word + " "
parsed_answer = parse_output_llm_with_sources(answer_yet)
history[-1] = (query, parsed_answer)
# Update logs_data with current answer (and get a new timestamp)
logs_data["answer"] = parsed_answer
yield [tuple(x) for x in history], docs_html, logs_data, session_id
await asyncio.sleep(0.05)
except Exception as e:
raise
async for update in process_stream():
yield update
# logging the event
try:
save_logs(scheduler,JSON_DATASET_PATH,logs_data)
except Exception as e:
raise
#####-------------------------- Gradio App--------------------------------------####
# Set up Gradio Theme
theme = gr.themes.Base(
primary_hue="blue",
secondary_hue="red",
font=[gr.themes.GoogleFont("Poppins"), "ui-sans-serif", "system-ui", "sans-serif"],
text_size = gr.themes.utils.sizes.text_sm,
)
init_prompt = """
Hello, I am EUDR Q&A, an AI-powered conversational assistant designed to help you understand EU Deforestation Regulation compliance and analysis. I will answer your questions by using **EUDR reports and uploaded GeoJSON files**.
💡 How to use (tabs on right)
- **Data Sources**: You can choose to either upload a GeoJSON file for analysis or talk to EUDR reports filtered by country.
- **Examples**: We have curated some example questions, select a particular question from category of questions.
- **Sources**: This tab will display the relied upon content from the reports or uploaded files, to help you in assessing or fact checking if the answer provided by EUDR Q&A assistant is correct or not.
⚠️ For limitations of the tool and collection of usage statistics and data please check **Disclaimer** tab.
⚠️ By using this app, you acknowledge that we collect usage statistics (such as question asked, feedback given, session duration, device type and anonymized geo-location) to understand performance and continuously improve the tool, based on our legitimate interest in enhancing our services.
"""
with gr.Blocks(title="EUDR Q&A", css= "style.css", theme=theme,elem_id = "main-component") as demo:
#----------------------------------------------------------------------------------------------
# main tab where chat interaction happens
# ---------------------------------------------------------------------------------------------
with gr.Tab("EUDR Q&A"):
with gr.Row(elem_id="chatbot-row"):
# chatbot output screen
with gr.Column(scale=2):
chatbot = gr.Chatbot(
value=[(None,init_prompt)],
show_copy_button=True,show_label = False,elem_id="chatbot",layout = "panel",
avatar_images = (None,"data-collection.png"),
)
# feedback UI
with gr.Column(elem_id="feedback-container"):
with gr.Row(visible=False) as feedback_row:
gr.Markdown("Was this response helpful?")
with gr.Row():
okay_btn = gr.Button("👍 Okay", elem_classes="feedback-button")
not_okay_btn = gr.Button("👎 Not to expectations", elem_classes="feedback-button")
feedback_thanks = gr.Markdown("Thanks for the feedback!", visible=False)
feedback_state = gr.State()
with gr.Row(elem_id = "input-message"):
textbox=gr.Textbox(placeholder="Ask me anything here!",show_label=False,scale=7,
lines = 1,interactive = True,elem_id="input-textbox")
# second column with playground area for user to select values
with gr.Column(scale=1, variant="panel",elem_id = "right-panel"):
# creating tabs on right panel
with gr.Tabs() as tabs:
#---------------- tab for DATA SOURCES SELECTION ----------------------
with gr.Tab("Data Sources",elem_id = "tab-config",id = 2):
#---------------- SELECTION METHOD - RADIO BUTTON ------------
search_method = gr.Radio(
choices=["Upload GeoJSON", "Talk to Reports"],
label="Choose data source",
info= "Upload a GeoJSON file for analysis or select country-specific EUDR reports",
value="Upload GeoJSON",
)
#---------------- UPLOAD GEOJSON SECTION ------------
with gr.Group(visible=True) as geojson_section:
uploaded_file = gr.File(
label="Upload GeoJSON File",
file_types=[".geojson", ".json"],
file_count="single"
)
upload_status = gr.Markdown("", visible=False)
submit_file_btn = gr.Button("Submit GeoJSON", variant="primary")
#---------------- TALK TO REPORTS SECTION ------------
with gr.Group(visible=False) as reports_section:
#----- Country filter ----------
dropdown_country = gr.Dropdown(
["Ecuador", "Guatemala"],
label="Select Country",
value=None,
interactive=True,
)
# Toggle visibility based on search method
def toggle_search_method(method):
"""Toggle between GeoJSON upload and country selection"""
if method == "Upload GeoJSON":
return (
gr.update(visible=True), # geojson_section
gr.update(visible=False), # reports_section
gr.update(value=None), # dropdown_country
gr.update(value=None) # uploaded_file
)
else: # "Talk to Reports"
return (
gr.update(visible=False), # geojson_section
gr.update(visible=True), # reports_section
gr.update(), # dropdown_country
gr.update(value=None) # uploaded_file
)
# Pass to the event handler
search_method.change(
fn=toggle_search_method,
inputs=[search_method],
outputs=[
geojson_section,
reports_section,
dropdown_country,
uploaded_file
]
)
# Handle file upload submission
submit_file_btn.click(
fn=handle_geojson_upload,
inputs=[uploaded_file],
outputs=[upload_status]
).then(
lambda: gr.update(visible=True),
outputs=[upload_status]
)
############### tab for Question selection ###############
with gr.TabItem("Examples",elem_id = "tab-examples",id = 0):
examples_hidden = gr.Textbox(visible = False)
# getting defualt key value to display
first_key = list(QUESTIONS.keys())[0]
# create the question category dropdown
dropdown_samples = gr.Dropdown(QUESTIONS.keys(),value = first_key,
interactive = True,show_label = True,
label = "Select a category of sample questions",
elem_id = "dropdown-samples")
# iterate through the questions list
samples = []
for i,key in enumerate(QUESTIONS.keys()):
examples_visible = True if i == 0 else False
with gr.Row(visible = examples_visible) as group_examples:
examples_questions = gr.Examples(
QUESTIONS[key],
[examples_hidden],
examples_per_page=8,
run_on_click=False,
elem_id=f"examples{i}",
api_name=f"examples{i}",
)
samples.append(group_examples)
##------------------- tab for Sources reporting ##------------------
with gr.Tab("Sources",elem_id = "tab-citations",id = 1):
sources_textbox = gr.HTML(show_label=False, elem_id="sources-textbox")
docs_textbox = gr.State("")
def change_sample_questions(key):
# update the questions list based on key selected
index = list(QUESTIONS.keys()).index(key)
visible_bools = [False] * len(samples)
visible_bools[index] = True
return [gr.update(visible=visible_bools[i]) for i in range(len(samples))]
dropdown_samples.change(change_sample_questions,dropdown_samples,samples)
# ---- Guidelines Tab ----
with gr.Tab("Guidelines", elem_classes="max-height other-tabs"):
gr.Markdown("""
#### Welcome to EUDR Q&A, your AI-powered assistant for exploring and understanding EU Deforestation Regulation compliance. This tool leverages advanced language models to help you get clear and structured answers based on EUDR data and reports. To get you started, here are a few tips on how to use the tool:
## 💬 Crafting Effective Prompts
Clear, specific questions will give you the best results. Here are some examples:
| ❌ Less Effective | ✅ More Effective |
|------------------|-------------------|
| "What is deforestation?" | "What are the main deforestation hotspots identified in Ecuador's latest assessment?" |
| "Tell me about compliance" | "What specific EUDR compliance requirements apply to coffee imports from Guatemala?" |
| "Show me data" | "What is the deforestation rate trend in the selected region between 2020-2023?" |
## ⭐ Best Practices
- **Be Clear and Specific**: Frame your questions clearly and focus on what you want to learn.
- **One Topic at a Time**: Break complex queries into simpler, focused questions.
- **Provide Context**: Mentioning specific regions, commodities, or time periods helps narrow the focus.
- **Follow Up**: Ask follow-up questions to explore a topic more deeply.
## 🔍 Utilizing Data Sources
- **GeoJSON Upload**: Upload your GeoJSON files containing geographic data for specific analysis of deforestation patterns in your area of interest.
- **Country Reports**: Select from Ecuador or Guatemala to access country-specific EUDR compliance reports and analysis.
## 📚 Useful Resources
- <ins>[**EU Deforestation Regulation Official Guide**](https://environment.ec.europa.eu/topics/forests/deforestation_en)</ins>
- <ins>[**EUDR Implementation Timeline**](https://ec.europa.eu/environment/forests/deforestation.htm)</ins>
- <ins>[**Commodity-Specific Guidelines**](https://ec.europa.eu/info/food-farming-fisheries/key-policies/common-agricultural-policy/market-measures/agri-food-chain_en)</ins>
Enjoy using EUDR Q&A and happy analyzing!
""")
# static tab 'about us'
with gr.Tab("About",elem_classes = "max-height other-tabs"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("""The <ins>[**EU Deforestation Regulation (EUDR)**](https://environment.ec.europa.eu/topics/forests/deforestation_en)</ins> \
represents a landmark piece of legislation aimed at minimizing the EU's contribution to global deforestation \
and forest degradation. The regulation requires companies to ensure that specific commodities placed on the EU market \
are deforestation-free and legally produced.
However, understanding and implementing EUDR compliance can be complex, involving detailed geographic data analysis, \
supply chain tracking, and regulatory interpretation. The current format of compliance reports and geographic data \
can be challenging for stakeholders to navigate effectively, potentially hindering proper implementation \
and compliance monitoring.
In response to this challenge, modern advancements in Artificial Intelligence (AI), \
particularly Retrieval Augmented Generation (RAG) technology and geographic data processing, \
emerge as promising solutions. By harnessing the capabilities of such AI tools, \
there is an opportunity to improve the accessibility and understanding \
of EUDR requirements and geographic risk assessments, ensuring that compliance insights are effectively \
translated into actionable outcomes for businesses and regulators.
To address these implementation challenges, the **GIZ** has initiated this prototype project \
to enhance how EUDR compliance data and geographic information are processed and understood. \
This AI-powered tool aims to offer critical capabilities such as analyzing complex geographic data, \
extracting compliance insights, and enabling interactive, user-friendly analysis through a chatbot interface.
This prototype tool leveraging AI (Artificial Intelligence) aims at offering critical capabilities such as \
summarizing complex compliance requirements, extracting geographic insights from GeoJSON data, \
and enabling interactive, user-friendly analysis through a chatbot interface. By making EUDR compliance information more accessible, \
this aims to increase understanding and proper implementation among stakeholders, \
which can lead to better environmental outcomes and regulatory compliance.
""")
# static tab for disclaimer
with gr.Tab("Disclaimer",elem_classes = "max-height other-tabs"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("""
- This chatbot is intended for specific use of answering questions based on EUDR compliance reports and uploaded GeoJSON data. For any use beyond this scope we have no liability for responses provided by the chatbot.
- The functionality and scope of this chatbot is limited to the context contained in EUDR reports and uploaded geographic data files.
- We have implemented measures to ensure the technical robustness and security of our AI system, minimizing unexpected behaviour, however we do not guarantee the full reliability, or completeness of any information provided by the chatbot and disclaim any liability or responsibility for actions taken based on its responses.
- The chatbot may occasionally provide inaccurate or inappropriate responses, and it is important to exercise judgment and critical thinking when interpreting its output, especially for compliance decisions.
- The use of AI within this application is transparent. When interacting with the AI, users are informed that they are engaging with an AI system.
- The chatbot responses should not be considered professional legal or compliance advice and are generated based on patterns in the data it has been trained on.
- The chatbot's responses do not reflect the official positions or policies of the EU, our organization or its affiliates regarding EUDR implementation.
- Any personal or sensitive information shared with the chatbot is at the user's own risk, and we cannot guarantee complete privacy or confidentiality.
- Uploaded GeoJSON files are processed locally and temporarily for analysis purposes. We recommend not uploading sensitive or proprietary geographic data.
- The chatbot is not deterministic, so there might be changes in answers to the same question when asked by different users or multiple times.
- When you use this app, we collect certain information to understand its usage, analyze performance, and continuously improve the tool for future use. This includes:
- The questions you ask
- The AI-generated answers
- Information about uploaded files (file names, not content)
- Feedback given towards each response (in form of Thumbs-up and Thumbs-down), if any.
- Usage statistics such as session duration, device type and anonymized geo-location information.
We process this data based on our legitimate interest in continually enhancing the quality, security, and usability of the EUDR Q&A assistant
- By using this chatbot, you agree to these terms and acknowledge that you are solely responsible for any reliance on or actions taken based on its responses.
- User can read more about the technical information about the tool in [**Readme**](https://huggingface.co/spaces/GIZ/eudr_assistant/blob/main/README.md) of this tool.
- **This is just a prototype and being tested and worked upon, so it's not perfect and may sometimes give irrelevant answers**. If you are not satisfied with the answer, please ask a more specific question or report your feedback to help us improve the system.
""")
def show_feedback(logs):
"""Show feedback buttons and store logs in state"""
return gr.update(visible=True), gr.update(visible=False), logs
def submit_feedback_okay(logs_data):
"""Handle 'okay' feedback submission"""
return submit_feedback("okay", logs_data)
def submit_feedback_not_okay(logs_data):
"""Handle 'not okay' feedback submission"""
return submit_feedback("not_okay", logs_data)
okay_btn.click(
submit_feedback_okay,
[feedback_state],
[feedback_row, feedback_thanks]
)
not_okay_btn.click(
submit_feedback_not_okay,
[feedback_state],
[feedback_row, feedback_thanks]
)
#-------------------- Session Management + Geolocation -------------------------
# Add these state components at the top level of the Blocks
session_id = gr.State(None)
client_ip = gr.State(None)
@demo.load(api_name="get_client_ip")
def get_client_ip_handler(dummy_input="", request: gr.Request = None):
"""Handler for getting client IP in Gradio context"""
return get_client_ip(request)
#-------------------- Gradio voodoo -------------------------
# Update the event handlers
(textbox
.submit(get_client_ip_handler, [textbox], [client_ip], api_name="get_ip_textbox")
.then(start_chat, [textbox, chatbot], [textbox, tabs, chatbot], queue=False, api_name="start_chat_textbox")
.then(chat,
[textbox, chatbot, search_method, dropdown_country, uploaded_file, client_ip, session_id],
[chatbot, sources_textbox, feedback_state, session_id],
queue=True, concurrency_limit=8, api_name="chat_textbox")
.then(show_feedback, [feedback_state], [feedback_row, feedback_thanks, feedback_state], api_name="show_feedback_textbox")
.then(finish_chat, None, [textbox], api_name="finish_chat_textbox"))
(examples_hidden
.change(start_chat, [examples_hidden, chatbot], [textbox, tabs, chatbot], queue=False, api_name="start_chat_examples")
.then(get_client_ip_handler, [examples_hidden], [client_ip], api_name="get_ip_examples")
.then(chat,
[examples_hidden, chatbot, search_method, dropdown_country, uploaded_file, client_ip, session_id],
[chatbot, sources_textbox, feedback_state, session_id],
concurrency_limit=8, api_name="chat_examples")
.then(show_feedback, [feedback_state], [feedback_row, feedback_thanks, feedback_state], api_name="show_feedback_examples")
.then(finish_chat, None, [textbox], api_name="finish_chat_examples"))
demo.queue()
demo.launch() |