Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
change retriever name
Browse files- app.py +7 -7
- utils/generator.py +56 -0
app.py
CHANGED
@@ -123,12 +123,12 @@ def format_whisp_statistics(df):
|
|
123 |
|
124 |
output = f"""🌍 **Geographic Analysis Results**
|
125 |
|
126 |
-
|
127 |
-
- **Country**: {country}
|
128 |
-
- **Administrative Region**: {admin_level}
|
129 |
- **Total Area**: {area_text}
|
130 |
|
131 |
-
|
132 |
*Risk levels are based on historical patterns, environmental factors, and land use data*
|
133 |
|
134 |
- **Overall Risk**: {format_risk(risk_level)}
|
@@ -139,7 +139,7 @@ def format_whisp_statistics(df):
|
|
139 |
- **Timber Extraction**: {format_risk(risk_timber)}
|
140 |
*Logging and wood harvesting*
|
141 |
|
142 |
-
|
143 |
*Based on Tropical Moist Forest satellite monitoring*
|
144 |
|
145 |
**Recent Deforestation (2020-Present):** {deforestation_formatted}
|
@@ -147,7 +147,7 @@ def format_whisp_statistics(df):
|
|
147 |
**EUDR Compliance Status:** {compliance_status}
|
148 |
|
149 |
---
|
150 |
-
|
151 |
|
152 |
- **For Suppliers**: {compliance_status.split(' - ')[1] if ' - ' in compliance_status else 'Review compliance requirements carefully'}
|
153 |
- **Risk Factors**: Focus on {', '.join([t.split('*')[1].strip('*') for t in [risk_pcrop, risk_acrop, risk_timber] if 'High' in format_risk(t)])} if any high-risk activities detected
|
@@ -200,7 +200,7 @@ def retrieve_paragraphs(query):
|
|
200 |
"""Connect to retriever and retrieve paragraphs"""
|
201 |
try:
|
202 |
# Call the API with the uploaded file
|
203 |
-
client = Client("https://giz-
|
204 |
result = client.predict(
|
205 |
query=query,
|
206 |
reports_filter="",
|
|
|
123 |
|
124 |
output = f"""🌍 **Geographic Analysis Results**
|
125 |
|
126 |
+
📍 **Location Details**
|
127 |
+
- **Country**: {country} \n
|
128 |
+
- **Administrative Region**: {admin_level} \n
|
129 |
- **Total Area**: {area_text}
|
130 |
|
131 |
+
⚠️ **Deforestation Risk Assessment**
|
132 |
*Risk levels are based on historical patterns, environmental factors, and land use data*
|
133 |
|
134 |
- **Overall Risk**: {format_risk(risk_level)}
|
|
|
139 |
- **Timber Extraction**: {format_risk(risk_timber)}
|
140 |
*Logging and wood harvesting*
|
141 |
|
142 |
+
🌳 **EUDR Compliance Analysis**
|
143 |
*Based on Tropical Moist Forest satellite monitoring*
|
144 |
|
145 |
**Recent Deforestation (2020-Present):** {deforestation_formatted}
|
|
|
147 |
**EUDR Compliance Status:** {compliance_status}
|
148 |
|
149 |
---
|
150 |
+
💡 **Key Insights**
|
151 |
|
152 |
- **For Suppliers**: {compliance_status.split(' - ')[1] if ' - ' in compliance_status else 'Review compliance requirements carefully'}
|
153 |
- **Risk Factors**: Focus on {', '.join([t.split('*')[1].strip('*') for t in [risk_pcrop, risk_acrop, risk_timber] if 'High' in format_risk(t)])} if any high-risk activities detected
|
|
|
200 |
"""Connect to retriever and retrieve paragraphs"""
|
201 |
try:
|
202 |
# Call the API with the uploaded file
|
203 |
+
client = Client("https://giz-eudr-retriever.hf.space/")
|
204 |
result = client.predict(
|
205 |
query=query,
|
206 |
reports_filter="",
|
utils/generator.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import configparser
|
2 |
+
|
3 |
+
|
4 |
+
def getconfig(configfile_path: str):
|
5 |
+
"""
|
6 |
+
Read the config file
|
7 |
+
Params
|
8 |
+
----------------
|
9 |
+
configfile_path: file path of .cfg file
|
10 |
+
"""
|
11 |
+
config = configparser.ConfigParser()
|
12 |
+
try:
|
13 |
+
config.read_file(open(configfile_path))
|
14 |
+
return config
|
15 |
+
except:
|
16 |
+
logging.warning("config file not found")
|
17 |
+
|
18 |
+
|
19 |
+
config = getconfig("model_params.cfg")
|
20 |
+
PROVIDER = config.get("generator", "PROVIDER")
|
21 |
+
MODEL = config.get("generator", "MODEL")
|
22 |
+
MAX_TOKENS = int(config.get("generator", "MAX_TOKENS"))
|
23 |
+
TEMPERATURE = float(config.get("generator", "TEMPERATURE"))
|
24 |
+
|
25 |
+
|
26 |
+
def generate_response(chunks, user_query, model):
|
27 |
+
"""
|
28 |
+
Generator function to produce a response text to the initial user query
|
29 |
+
using the retrieved document chunks and a model.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
chunks (list of dict): Retrieved chunks, each with 'answer' and 'score'.
|
33 |
+
user_query (str): The initial query text from the user.
|
34 |
+
model (callable or object): Language model or interface to generate text (stub).
|
35 |
+
|
36 |
+
Yields:
|
37 |
+
str: Generated text response to the query.
|
38 |
+
"""
|
39 |
+
|
40 |
+
# Aggregate text from relevant chunks for context
|
41 |
+
context_text = "\n\n".join(chunk['answer'] for chunk in relevant_chunks)
|
42 |
+
|
43 |
+
# Step 3: Compose prompt for model (simulate or call model)
|
44 |
+
prompt = (
|
45 |
+
f"You are an assistant responding to the query:\n\"{user_query}\"\n\n"
|
46 |
+
f"Use the following information to answer:\n{context_text}\n\n"
|
47 |
+
f"Provide a clear, concise, and informative answer."
|
48 |
+
)
|
49 |
+
|
50 |
+
# Step 4: Use model to generate the response text
|
51 |
+
# This is a placeholder; replace with actual model call, e.g.:
|
52 |
+
# response = model.generate_text(prompt)
|
53 |
+
# For demo, just yield the prompt as a stub.
|
54 |
+
|
55 |
+
# Yield the final response once
|
56 |
+
yield f"Simulated response based on retrieved info:\n\n{prompt}"
|