Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
remove additional providers
Browse files- app.py +6 -2
- utils/__pycache__/generator.cpython-310.pyc +0 -0
- utils/generator.py +22 -25
app.py
CHANGED
@@ -6,8 +6,10 @@ from uuid import uuid4
|
|
6 |
from gradio_client import Client, handle_file
|
7 |
from utils.whisp_api import handle_geojson_upload
|
8 |
from utils.retriever import retrieve_paragraphs
|
|
|
9 |
|
10 |
# Sample questions for examples
|
|
|
11 |
SAMPLE_QUESTIONS = {
|
12 |
"Deforestation Analysis": [
|
13 |
"What are the main deforestation hotspots in Ecuador?",
|
@@ -26,6 +28,7 @@ SAMPLE_QUESTIONS = {
|
|
26 |
]
|
27 |
}
|
28 |
|
|
|
29 |
|
30 |
def start_chat(query, history):
|
31 |
"""Start a new chat interaction"""
|
@@ -67,8 +70,9 @@ async def chat_response(query, history, method, country, uploaded_file):
|
|
67 |
else:
|
68 |
try:
|
69 |
response = f"Based on EUDR reports for {country}, I can help you understand deforestation patterns and compliance requirements. Your question: '{query}' is being analyzed..."
|
70 |
-
|
71 |
-
response =
|
|
|
72 |
except Exception as e:
|
73 |
response = f"Error retrieving information: {str(e)}"
|
74 |
|
|
|
6 |
from gradio_client import Client, handle_file
|
7 |
from utils.whisp_api import handle_geojson_upload
|
8 |
from utils.retriever import retrieve_paragraphs
|
9 |
+
from utils.generator import generate
|
10 |
|
11 |
# Sample questions for examples
|
12 |
+
|
13 |
SAMPLE_QUESTIONS = {
|
14 |
"Deforestation Analysis": [
|
15 |
"What are the main deforestation hotspots in Ecuador?",
|
|
|
28 |
]
|
29 |
}
|
30 |
|
31 |
+
# Initialize Chat
|
32 |
|
33 |
def start_chat(query, history):
|
34 |
"""Start a new chat interaction"""
|
|
|
70 |
else:
|
71 |
try:
|
72 |
response = f"Based on EUDR reports for {country}, I can help you understand deforestation patterns and compliance requirements. Your question: '{query}' is being analyzed..."
|
73 |
+
retrieved_paragraphs = retrieve_paragraphs(query)
|
74 |
+
response = generate(query=query, context=retrieve_paragraphs)
|
75 |
+
|
76 |
except Exception as e:
|
77 |
response = f"Error retrieving information: {str(e)}"
|
78 |
|
utils/__pycache__/generator.cpython-310.pyc
ADDED
Binary file (5.9 kB). View file
|
|
utils/generator.py
CHANGED
@@ -6,9 +6,6 @@ from typing import List, Dict, Any, Union
|
|
6 |
from dotenv import load_dotenv
|
7 |
|
8 |
# LangChain imports
|
9 |
-
from langchain_openai import ChatOpenAI
|
10 |
-
from langchain_anthropic import ChatAnthropic
|
11 |
-
from langchain_cohere import ChatCohere
|
12 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
13 |
from langchain_core.messages import SystemMessage, HumanMessage
|
14 |
|
@@ -37,10 +34,10 @@ def getconfig(configfile_path: str):
|
|
37 |
def get_auth(provider: str) -> dict:
|
38 |
"""Get authentication configuration for different providers"""
|
39 |
auth_configs = {
|
40 |
-
"openai": {"api_key": os.getenv("OPENAI_API_KEY")},
|
41 |
"huggingface": {"api_key": os.getenv("HF_TOKEN")},
|
42 |
-
"anthropic": {"api_key": os.getenv("ANTHROPIC_API_KEY")},
|
43 |
-
"cohere": {"api_key": os.getenv("COHERE_API_KEY")},
|
44 |
}
|
45 |
|
46 |
if provider not in auth_configs:
|
@@ -76,25 +73,25 @@ def get_chat_model():
|
|
76 |
}
|
77 |
logging.info(f"provider is {PROVIDER}")
|
78 |
|
79 |
-
if PROVIDER == "openai":
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
elif PROVIDER == "anthropic":
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
elif PROVIDER == "cohere":
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
# Initialize HuggingFaceEndpoint with explicit parameters
|
99 |
llm = HuggingFaceEndpoint(
|
100 |
repo_id=MODEL,
|
|
|
6 |
from dotenv import load_dotenv
|
7 |
|
8 |
# LangChain imports
|
|
|
|
|
|
|
9 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
10 |
from langchain_core.messages import SystemMessage, HumanMessage
|
11 |
|
|
|
34 |
def get_auth(provider: str) -> dict:
|
35 |
"""Get authentication configuration for different providers"""
|
36 |
auth_configs = {
|
37 |
+
#"openai": {"api_key": os.getenv("OPENAI_API_KEY")},
|
38 |
"huggingface": {"api_key": os.getenv("HF_TOKEN")},
|
39 |
+
#"anthropic": {"api_key": os.getenv("ANTHROPIC_API_KEY")},
|
40 |
+
#"cohere": {"api_key": os.getenv("COHERE_API_KEY")},
|
41 |
}
|
42 |
|
43 |
if provider not in auth_configs:
|
|
|
73 |
}
|
74 |
logging.info(f"provider is {PROVIDER}")
|
75 |
|
76 |
+
# if PROVIDER == "openai":
|
77 |
+
# return ChatOpenAI(
|
78 |
+
# model=MODEL,
|
79 |
+
# openai_api_key=auth_config["api_key"],
|
80 |
+
# **common_params
|
81 |
+
# )
|
82 |
+
# elif PROVIDER == "anthropic":
|
83 |
+
# return ChatAnthropic(
|
84 |
+
# model=MODEL,
|
85 |
+
# anthropic_api_key=auth_config["api_key"],
|
86 |
+
# **common_params
|
87 |
+
# )
|
88 |
+
# elif PROVIDER == "cohere":
|
89 |
+
# return ChatCohere(
|
90 |
+
# model=MODEL,
|
91 |
+
# cohere_api_key=auth_config["api_key"],
|
92 |
+
# **common_params
|
93 |
+
# )
|
94 |
+
if PROVIDER == "huggingface":
|
95 |
# Initialize HuggingFaceEndpoint with explicit parameters
|
96 |
llm = HuggingFaceEndpoint(
|
97 |
repo_id=MODEL,
|