Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import uuid
|
4 |
+
import threading
|
5 |
+
import pandas as pd
|
6 |
+
import torch
|
7 |
+
from langchain.document_loaders import CSVLoader
|
8 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
9 |
+
from langchain.vectorstores import FAISS
|
10 |
+
from langchain.llms import HuggingFacePipeline
|
11 |
+
from langchain.chains import LLMChain
|
12 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, T5Tokenizer, T5ForConditionalGeneration, pipeline
|
13 |
+
from langchain.prompts import PromptTemplate
|
14 |
+
import time
|
15 |
+
|
16 |
+
# Global model cache
|
17 |
+
MODEL_CACHE = {
|
18 |
+
"model": None,
|
19 |
+
"tokenizer": None,
|
20 |
+
"init_lock": threading.Lock(),
|
21 |
+
"model_name": None
|
22 |
+
}
|
23 |
+
|
24 |
+
# Create directories for user data
|
25 |
+
os.makedirs("user_data", exist_ok=True)
|
26 |
+
|
27 |
+
# Model configuration dictionary
|
28 |
+
MODEL_CONFIG = {
|
29 |
+
"Llama 2 Chat": {
|
30 |
+
"name": "TheBloke/Llama-2-7B-Chat-GGUF",
|
31 |
+
"description": "Llama 2 7B Chat model with good general performance",
|
32 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
33 |
+
},
|
34 |
+
"TinyLlama Chat": {
|
35 |
+
"name": "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
|
36 |
+
"description": "Compact 1.1B parameter model, fast but less powerful",
|
37 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
38 |
+
},
|
39 |
+
"Mistral Instruct": {
|
40 |
+
"name": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",
|
41 |
+
"description": "7B instruction-tuned model with excellent reasoning",
|
42 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
43 |
+
},
|
44 |
+
"Phi-4 Mini Instruct": {
|
45 |
+
"name": "microsoft/Phi-4-mini-instruct",
|
46 |
+
"description": "Compact Microsoft model with strong instruction following",
|
47 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
48 |
+
},
|
49 |
+
"DeepSeek Coder Instruct": {
|
50 |
+
"name": "deepseek-ai/deepseek-coder-1.3b-instruct",
|
51 |
+
"description": "1.3B model specialized for code understanding",
|
52 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
53 |
+
},
|
54 |
+
"DeepSeek Lite Chat": {
|
55 |
+
"name": "deepseek-ai/DeepSeek-V2-Lite-Chat",
|
56 |
+
"description": "Light but powerful chat model from DeepSeek",
|
57 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
58 |
+
},
|
59 |
+
"Qwen2.5 Coder Instruct": {
|
60 |
+
"name": "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF",
|
61 |
+
"description": "3B model specialized for code and technical applications",
|
62 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
63 |
+
},
|
64 |
+
"DeepSeek Distill Qwen": {
|
65 |
+
"name": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
66 |
+
"description": "1.5B distilled model with good balance of speed and quality",
|
67 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32
|
68 |
+
},
|
69 |
+
"Flan T5 Small": {
|
70 |
+
"name": "google/flan-t5-small",
|
71 |
+
"description": "Lightweight T5 model optimized for instruction following",
|
72 |
+
"dtype": torch.float16 if torch.cuda.is_available() else torch.float32,
|
73 |
+
"is_t5": True
|
74 |
+
}
|
75 |
+
}
|
76 |
+
|
77 |
+
def initialize_model_once(model_key):
|
78 |
+
"""Initialize the model once and cache it"""
|
79 |
+
with MODEL_CACHE["init_lock"]:
|
80 |
+
current_model = MODEL_CACHE["model_name"]
|
81 |
+
if MODEL_CACHE["model"] is None or current_model != model_key:
|
82 |
+
# Clear previous model from memory if any
|
83 |
+
if MODEL_CACHE["model"] is not None:
|
84 |
+
del MODEL_CACHE["model"]
|
85 |
+
del MODEL_CACHE["tokenizer"]
|
86 |
+
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
87 |
+
|
88 |
+
model_info = MODEL_CONFIG[model_key]
|
89 |
+
model_name = model_info["name"]
|
90 |
+
MODEL_CACHE["model_name"] = model_key
|
91 |
+
|
92 |
+
# Handle T5 models separately
|
93 |
+
if model_info.get("is_t5", False):
|
94 |
+
MODEL_CACHE["tokenizer"] = T5Tokenizer.from_pretrained(model_name)
|
95 |
+
MODEL_CACHE["model"] = T5ForConditionalGeneration.from_pretrained(
|
96 |
+
model_name,
|
97 |
+
torch_dtype=model_info["dtype"],
|
98 |
+
device_map="auto",
|
99 |
+
low_cpu_mem_usage=True
|
100 |
+
)
|
101 |
+
else:
|
102 |
+
# Load tokenizer and model with appropriate configuration
|
103 |
+
MODEL_CACHE["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
|
104 |
+
MODEL_CACHE["model"] = AutoModelForCausalLM.from_pretrained(
|
105 |
+
model_name,
|
106 |
+
torch_dtype=model_info["dtype"],
|
107 |
+
device_map="auto",
|
108 |
+
low_cpu_mem_usage=True,
|
109 |
+
trust_remote_code=True
|
110 |
+
)
|
111 |
+
|
112 |
+
return MODEL_CACHE["tokenizer"], MODEL_CACHE["model"], model_info.get("is_t5", False)
|
113 |
+
|
114 |
+
def create_llm_pipeline(model_key):
|
115 |
+
"""Create a new pipeline using the specified model"""
|
116 |
+
tokenizer, model, is_t5 = initialize_model_once(model_key)
|
117 |
+
|
118 |
+
# Create appropriate pipeline based on model type
|
119 |
+
if is_t5:
|
120 |
+
pipe = pipeline(
|
121 |
+
"text2text-generation",
|
122 |
+
model=model,
|
123 |
+
tokenizer=tokenizer,
|
124 |
+
max_new_tokens=256,
|
125 |
+
temperature=0.3,
|
126 |
+
top_p=0.9,
|
127 |
+
return_full_text=False,
|
128 |
+
)
|
129 |
+
else:
|
130 |
+
pipe = pipeline(
|
131 |
+
"text-generation",
|
132 |
+
model=model,
|
133 |
+
tokenizer=tokenizer,
|
134 |
+
max_new_tokens=256,
|
135 |
+
temperature=0.3,
|
136 |
+
top_p=0.9,
|
137 |
+
top_k=30,
|
138 |
+
repetition_penalty=1.2,
|
139 |
+
return_full_text=False,
|
140 |
+
)
|
141 |
+
|
142 |
+
# Wrap pipeline in HuggingFacePipeline for LangChain compatibility
|
143 |
+
return HuggingFacePipeline(pipeline=pipe)
|
144 |
+
|
145 |
+
def create_conversational_chain(db, file_path, model_key):
|
146 |
+
llm = create_llm_pipeline(model_key)
|
147 |
+
|
148 |
+
# Load the file into pandas to enable code execution for data analysis
|
149 |
+
df = pd.read_csv(file_path)
|
150 |
+
|
151 |
+
# Create improved prompt template that focuses on direct answers, not code
|
152 |
+
template = """
|
153 |
+
Berikut ini adalah informasi tentang file CSV:
|
154 |
+
|
155 |
+
Kolom-kolom dalam file: {columns}
|
156 |
+
|
157 |
+
Beberapa baris pertama:
|
158 |
+
{sample_data}
|
159 |
+
|
160 |
+
Konteks tambahan dari vector database:
|
161 |
+
{context}
|
162 |
+
|
163 |
+
Pertanyaan: {question}
|
164 |
+
|
165 |
+
INSTRUKSI PENTING:
|
166 |
+
1. Jangan tampilkan kode Python, berikan jawaban langsung dalam Bahasa Indonesia.
|
167 |
+
2. Jika pertanyaan terkait statistik data (rata-rata, maksimum dll), lakukan perhitungan dan berikan hasilnya.
|
168 |
+
3. Jawaban harus singkat, jelas dan akurat berdasarkan data yang ada.
|
169 |
+
4. Gunakan format yang sesuai untuk angka (desimal 2 digit untuk nilai non-integer).
|
170 |
+
5. Jangan menyebutkan proses perhitungan, fokus pada hasil akhir.
|
171 |
+
|
172 |
+
Jawaban:
|
173 |
+
"""
|
174 |
+
|
175 |
+
PROMPT = PromptTemplate(
|
176 |
+
template=template,
|
177 |
+
input_variables=["columns", "sample_data", "context", "question"]
|
178 |
+
)
|
179 |
+
|
180 |
+
# Create retriever
|
181 |
+
retriever = db.as_retriever(search_kwargs={"k": 3}) # Reduced k for better performance
|
182 |
+
|
183 |
+
# Process query with better error handling
|
184 |
+
def process_query(query, chat_history):
|
185 |
+
try:
|
186 |
+
# Get information from dataframe for context
|
187 |
+
columns_str = ", ".join(df.columns.tolist())
|
188 |
+
sample_data = df.head(2).to_string() # Reduced to 2 rows for performance
|
189 |
+
|
190 |
+
# Get context from vector database
|
191 |
+
docs = retriever.get_relevant_documents(query)
|
192 |
+
context = "\n\n".join([doc.page_content for doc in docs])
|
193 |
+
|
194 |
+
# Dynamically calculate answers for common statistical queries
|
195 |
+
def preprocess_query():
|
196 |
+
query_lower = query.lower()
|
197 |
+
result = None
|
198 |
+
|
199 |
+
# Handle statistical queries directly
|
200 |
+
if "rata-rata" in query_lower or "mean" in query_lower or "average" in query_lower:
|
201 |
+
for col in df.columns:
|
202 |
+
if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
|
203 |
+
try:
|
204 |
+
result = f"Rata-rata {col} adalah {df[col].mean():.2f}"
|
205 |
+
except:
|
206 |
+
pass
|
207 |
+
|
208 |
+
elif "maksimum" in query_lower or "max" in query_lower or "tertinggi" in query_lower:
|
209 |
+
for col in df.columns:
|
210 |
+
if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
|
211 |
+
try:
|
212 |
+
result = f"Nilai maksimum {col} adalah {df[col].max():.2f}"
|
213 |
+
except:
|
214 |
+
pass
|
215 |
+
|
216 |
+
elif "minimum" in query_lower or "min" in query_lower or "terendah" in query_lower:
|
217 |
+
for col in df.columns:
|
218 |
+
if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
|
219 |
+
try:
|
220 |
+
result = f"Nilai minimum {col} adalah {df[col].min():.2f}"
|
221 |
+
except:
|
222 |
+
pass
|
223 |
+
|
224 |
+
elif "total" in query_lower or "jumlah" in query_lower or "sum" in query_lower:
|
225 |
+
for col in df.columns:
|
226 |
+
if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
|
227 |
+
try:
|
228 |
+
result = f"Total {col} adalah {df[col].sum():.2f}"
|
229 |
+
except:
|
230 |
+
pass
|
231 |
+
|
232 |
+
elif "baris" in query_lower or "jumlah data" in query_lower or "row" in query_lower:
|
233 |
+
result = f"Jumlah baris data adalah {len(df)}"
|
234 |
+
|
235 |
+
elif "kolom" in query_lower or "field" in query_lower:
|
236 |
+
if "nama" in query_lower or "list" in query_lower or "sebutkan" in query_lower:
|
237 |
+
result = f"Kolom dalam data: {', '.join(df.columns.tolist())}"
|
238 |
+
|
239 |
+
return result
|
240 |
+
|
241 |
+
# Try direct calculation first
|
242 |
+
direct_answer = preprocess_query()
|
243 |
+
if direct_answer:
|
244 |
+
return {"answer": direct_answer}
|
245 |
+
|
246 |
+
# If no direct calculation, use the LLM
|
247 |
+
chain = LLMChain(llm=llm, prompt=PROMPT)
|
248 |
+
raw_result = chain.run(
|
249 |
+
columns=columns_str,
|
250 |
+
sample_data=sample_data,
|
251 |
+
context=context,
|
252 |
+
question=query
|
253 |
+
)
|
254 |
+
|
255 |
+
# Clean the result
|
256 |
+
cleaned_result = raw_result.strip()
|
257 |
+
|
258 |
+
# If result is empty after cleaning, use a fallback
|
259 |
+
if not cleaned_result:
|
260 |
+
return {"answer": "Tidak dapat memproses jawaban. Silakan coba pertanyaan lain."}
|
261 |
+
|
262 |
+
return {"answer": cleaned_result}
|
263 |
+
except Exception as e:
|
264 |
+
import traceback
|
265 |
+
print(f"Error in process_query: {str(e)}")
|
266 |
+
print(traceback.format_exc())
|
267 |
+
return {"answer": f"Terjadi kesalahan saat memproses pertanyaan: {str(e)}"}
|
268 |
+
|
269 |
+
return process_query
|
270 |
+
|
271 |
+
class ChatBot:
|
272 |
+
def __init__(self, session_id, model_key="DeepSeek Coder Instruct"):
|
273 |
+
self.session_id = session_id
|
274 |
+
self.chat_history = []
|
275 |
+
self.chain = None
|
276 |
+
self.user_dir = f"user_data/{session_id}"
|
277 |
+
self.csv_file_path = None
|
278 |
+
self.model_key = model_key
|
279 |
+
os.makedirs(self.user_dir, exist_ok=True)
|
280 |
+
|
281 |
+
def process_file(self, file, model_key=None):
|
282 |
+
if model_key:
|
283 |
+
self.model_key = model_key
|
284 |
+
|
285 |
+
if file is None:
|
286 |
+
return "Mohon upload file CSV terlebih dahulu."
|
287 |
+
|
288 |
+
try:
|
289 |
+
# Handle file from Gradio
|
290 |
+
file_path = file.name if hasattr(file, 'name') else str(file)
|
291 |
+
self.csv_file_path = file_path
|
292 |
+
|
293 |
+
# Copy to user directory
|
294 |
+
user_file_path = f"{self.user_dir}/uploaded.csv"
|
295 |
+
|
296 |
+
# Verify the CSV can be loaded
|
297 |
+
try:
|
298 |
+
df = pd.read_csv(file_path)
|
299 |
+
print(f"CSV verified: {df.shape[0]} rows, {len(df.columns)} columns")
|
300 |
+
|
301 |
+
# Save a copy in user directory
|
302 |
+
df.to_csv(user_file_path, index=False)
|
303 |
+
self.csv_file_path = user_file_path
|
304 |
+
except Exception as e:
|
305 |
+
return f"Error membaca CSV: {str(e)}"
|
306 |
+
|
307 |
+
# Load document with reduced chunk size for better memory usage
|
308 |
+
try:
|
309 |
+
loader = CSVLoader(file_path=file_path, encoding="utf-8", csv_args={
|
310 |
+
'delimiter': ','})
|
311 |
+
data = loader.load()
|
312 |
+
print(f"Documents loaded: {len(data)}")
|
313 |
+
except Exception as e:
|
314 |
+
return f"Error loading documents: {str(e)}"
|
315 |
+
|
316 |
+
# Create vector database with optimized settings
|
317 |
+
try:
|
318 |
+
db_path = f"{self.user_dir}/db_faiss"
|
319 |
+
|
320 |
+
# Use CPU-friendly embeddings with smaller dimensions
|
321 |
+
embeddings = HuggingFaceEmbeddings(
|
322 |
+
model_name='sentence-transformers/all-MiniLM-L6-v2',
|
323 |
+
model_kwargs={'device': 'cpu'}
|
324 |
+
)
|
325 |
+
|
326 |
+
db = FAISS.from_documents(data, embeddings)
|
327 |
+
db.save_local(db_path)
|
328 |
+
print(f"Vector database created at {db_path}")
|
329 |
+
except Exception as e:
|
330 |
+
return f"Error creating vector database: {str(e)}"
|
331 |
+
|
332 |
+
# Create custom chain
|
333 |
+
try:
|
334 |
+
self.chain = create_conversational_chain(db, self.csv_file_path, self.model_key)
|
335 |
+
print(f"Chain created successfully using model: {self.model_key}")
|
336 |
+
except Exception as e:
|
337 |
+
return f"Error creating chain: {str(e)}"
|
338 |
+
|
339 |
+
# Add basic file info to chat history for context
|
340 |
+
file_info = f"CSV berhasil dimuat dengan {df.shape[0]} baris dan {len(df.columns)} kolom menggunakan model {self.model_key}. Kolom: {', '.join(df.columns.tolist())}"
|
341 |
+
self.chat_history.append(("System", file_info))
|
342 |
+
|
343 |
+
return f"File CSV berhasil diproses dengan model {self.model_key}! Anda dapat mulai chat dengan model untuk analisis data."
|
344 |
+
except Exception as e:
|
345 |
+
import traceback
|
346 |
+
print(traceback.format_exc())
|
347 |
+
return f"Error pemrosesan file: {str(e)}"
|
348 |
+
|
349 |
+
def change_model(self, model_key):
|
350 |
+
"""Change the model being used and recreate the chain if necessary"""
|
351 |
+
if model_key == self.model_key:
|
352 |
+
return f"Model {model_key} sudah digunakan."
|
353 |
+
|
354 |
+
self.model_key = model_key
|
355 |
+
|
356 |
+
# If we have an active session with a file already loaded, recreate the chain
|
357 |
+
if self.csv_file_path:
|
358 |
+
try:
|
359 |
+
# Load existing database
|
360 |
+
db_path = f"{self.user_dir}/db_faiss"
|
361 |
+
embeddings = HuggingFaceEmbeddings(
|
362 |
+
model_name='sentence-transformers/all-MiniLM-L6-v2',
|
363 |
+
model_kwargs={'device': 'cpu'}
|
364 |
+
)
|
365 |
+
db = FAISS.load_local(db_path, embeddings)
|
366 |
+
|
367 |
+
# Create new chain with the selected model
|
368 |
+
self.chain = create_conversational_chain(db, self.csv_file_path, self.model_key)
|
369 |
+
|
370 |
+
return f"Model berhasil diubah ke {model_key}."
|
371 |
+
except Exception as e:
|
372 |
+
return f"Error mengubah model: {str(e)}"
|
373 |
+
else:
|
374 |
+
return f"Model diubah ke {model_key}. Silakan upload file CSV untuk memulai."
|
375 |
+
|
376 |
+
def chat(self, message, history):
|
377 |
+
if self.chain is None:
|
378 |
+
return "Mohon upload file CSV terlebih dahulu."
|
379 |
+
|
380 |
+
try:
|
381 |
+
# Process the question with the chain
|
382 |
+
result = self.chain(message, self.chat_history)
|
383 |
+
|
384 |
+
# Get the answer with fallback
|
385 |
+
answer = result.get("answer", "Maaf, tidak dapat menghasilkan jawaban. Silakan coba pertanyaan lain.")
|
386 |
+
|
387 |
+
# Ensure we never return empty
|
388 |
+
if not answer or answer.strip() == "":
|
389 |
+
answer = "Maaf, tidak dapat menghasilkan jawaban yang sesuai. Silakan coba pertanyaan lain."
|
390 |
+
|
391 |
+
# Update internal chat history
|
392 |
+
self.chat_history.append((message, answer))
|
393 |
+
|
394 |
+
# Return just the answer for Gradio
|
395 |
+
return answer
|
396 |
+
except Exception as e:
|
397 |
+
import traceback
|
398 |
+
print(traceback.format_exc())
|
399 |
+
return f"Error: {str(e)}"
|
400 |
+
|
401 |
+
# UI Code
|
402 |
+
def create_gradio_interface():
|
403 |
+
with gr.Blocks(title="Chat with CSV using AI Models") as interface:
|
404 |
+
session_id = gr.State(lambda: str(uuid.uuid4()))
|
405 |
+
chatbot_state = gr.State(lambda: None)
|
406 |
+
|
407 |
+
# Get model choices
|
408 |
+
model_choices = list(MODEL_CONFIG.keys())
|
409 |
+
default_model = "DeepSeek Coder Instruct" # Default model
|
410 |
+
|
411 |
+
gr.HTML("<h1 style='text-align: center;'>Chat with CSV using AI Models</h1>")
|
412 |
+
gr.HTML("<h3 style='text-align: center;'>Asisten analisis CSV untuk berbagai kebutuhan</h3>")
|
413 |
+
|
414 |
+
with gr.Row():
|
415 |
+
with gr.Column(scale=1):
|
416 |
+
file_input = gr.File(
|
417 |
+
label="Upload CSV Anda",
|
418 |
+
file_types=[".csv"]
|
419 |
+
)
|
420 |
+
|
421 |
+
# Model selection accordion BEFORE process button
|
422 |
+
with gr.Accordion("Pilih Model AI", open=True):
|
423 |
+
model_dropdown = gr.Dropdown(
|
424 |
+
label="Model",
|
425 |
+
choices=model_choices,
|
426 |
+
value=default_model
|
427 |
+
)
|
428 |
+
model_info = gr.Markdown(
|
429 |
+
value=f"**{default_model}**: {MODEL_CONFIG[default_model]['description']}"
|
430 |
+
)
|
431 |
+
|
432 |
+
# Process button AFTER the accordion
|
433 |
+
process_button = gr.Button("Proses CSV")
|
434 |
+
|
435 |
+
with gr.Column(scale=2):
|
436 |
+
chatbot_interface = gr.Chatbot(
|
437 |
+
label="Riwayat Chat",
|
438 |
+
height=400
|
439 |
+
)
|
440 |
+
message_input = gr.Textbox(
|
441 |
+
label="Ketik pesan Anda",
|
442 |
+
placeholder="Tanyakan tentang data CSV Anda...",
|
443 |
+
lines=2
|
444 |
+
)
|
445 |
+
submit_button = gr.Button("Kirim")
|
446 |
+
clear_button = gr.Button("Bersihkan Chat")
|
447 |
+
|
448 |
+
# Update model info when selection changes
|
449 |
+
def update_model_info(model_key):
|
450 |
+
return f"**{model_key}**: {MODEL_CONFIG[model_key]['description']}"
|
451 |
+
|
452 |
+
model_dropdown.change(
|
453 |
+
fn=update_model_info,
|
454 |
+
inputs=[model_dropdown],
|
455 |
+
outputs=[model_info]
|
456 |
+
)
|
457 |
+
|
458 |
+
# Process file handler
|
459 |
+
def handle_process_file(file, model_key, sess_id):
|
460 |
+
chatbot = ChatBot(sess_id, model_key)
|
461 |
+
result = chatbot.process_file(file)
|
462 |
+
return chatbot, [(None, result)]
|
463 |
+
|
464 |
+
process_button.click(
|
465 |
+
fn=handle_process_file,
|
466 |
+
inputs=[file_input, model_dropdown, session_id],
|
467 |
+
outputs=[chatbot_state, chatbot_interface]
|
468 |
+
)
|
469 |
+
|
470 |
+
# Change model handler
|
471 |
+
def handle_model_change(model_key, chatbot, sess_id):
|
472 |
+
if chatbot is None:
|
473 |
+
chatbot = ChatBot(sess_id, model_key)
|
474 |
+
return chatbot, [(None, f"Model diatur ke {model_key}. Silakan upload file CSV.")]
|
475 |
+
|
476 |
+
result = chatbot.change_model(model_key)
|
477 |
+
return chatbot, chatbot.chat_history + [(None, result)]
|
478 |
+
|
479 |
+
model_dropdown.change(
|
480 |
+
fn=handle_model_change,
|
481 |
+
inputs=[model_dropdown, chatbot_state, session_id],
|
482 |
+
outputs=[chatbot_state, chatbot_interface]
|
483 |
+
)
|
484 |
+
|
485 |
+
# Chat handlers
|
486 |
+
def user_message_submitted(message, history, chatbot, sess_id):
|
487 |
+
history = history + [(message, None)]
|
488 |
+
return history, "", chatbot, sess_id
|
489 |
+
|
490 |
+
def bot_response(history, chatbot, sess_id):
|
491 |
+
if chatbot is None:
|
492 |
+
chatbot = ChatBot(sess_id)
|
493 |
+
history[-1] = (history[-1][0], "Mohon upload file CSV terlebih dahulu.")
|
494 |
+
return chatbot, history
|
495 |
+
|
496 |
+
user_message = history[-1][0]
|
497 |
+
response = chatbot.chat(user_message, history[:-1])
|
498 |
+
history[-1] = (user_message, response)
|
499 |
+
return chatbot, history
|
500 |
+
|
501 |
+
submit_button.click(
|
502 |
+
fn=user_message_submitted,
|
503 |
+
inputs=[message_input, chatbot_interface, chatbot_state, session_id],
|
504 |
+
outputs=[chatbot_interface, message_input, chatbot_state, session_id]
|
505 |
+
).then(
|
506 |
+
fn=bot_response,
|
507 |
+
inputs=[chatbot_interface, chatbot_state, session_id],
|
508 |
+
outputs=[chatbot_state, chatbot_interface]
|
509 |
+
)
|
510 |
+
|
511 |
+
message_input.submit(
|
512 |
+
fn=user_message_submitted,
|
513 |
+
inputs=[message_input, chatbot_interface, chatbot_state, session_id],
|
514 |
+
outputs=[chatbot_interface, message_input, chatbot_state, session_id]
|
515 |
+
).then(
|
516 |
+
fn=bot_response,
|
517 |
+
inputs=[chatbot_interface, chatbot_state, session_id],
|
518 |
+
outputs=[chatbot_state, chatbot_interface]
|
519 |
+
)
|
520 |
+
|
521 |
+
# Clear chat handler
|
522 |
+
def handle_clear_chat(chatbot):
|
523 |
+
if chatbot is not None:
|
524 |
+
chatbot.chat_history = []
|
525 |
+
return chatbot, []
|
526 |
+
|
527 |
+
clear_button.click(
|
528 |
+
fn=handle_clear_chat,
|
529 |
+
inputs=[chatbot_state],
|
530 |
+
outputs=[chatbot_state, chatbot_interface]
|
531 |
+
)
|
532 |
+
|
533 |
+
return interface
|
534 |
+
|
535 |
+
# Launch the interface
|
536 |
+
demo = create_gradio_interface()
|
537 |
+
demo.launch(share=True)
|