Raffaele Terribile
commited on
MOdifica applicazione per usare un modello locale
Browse files- app.py +62 -18
- requirements.txt +4 -2
app.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
|
| 7 |
-
from smolagents import CodeAgent,
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# (Keep Constants as is)
|
| 10 |
# --- Constants ---
|
|
@@ -35,25 +39,29 @@ class FirstAgent:
|
|
| 35 |
|
| 36 |
# Configurazione con fallback multipli
|
| 37 |
model = None
|
| 38 |
-
|
| 39 |
# Try 1: Modello locale via Transformers
|
| 40 |
try:
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
)
|
| 45 |
-
print("Using local
|
| 46 |
except Exception as e:
|
| 47 |
print(f"Local model failed: {e}")
|
| 48 |
-
|
| 49 |
# Try 2: Modello remoto gratuito
|
| 50 |
try:
|
| 51 |
-
model =
|
| 52 |
model_id="groq/mixtral-8x7b-32768" # Gratuito con registrazione
|
| 53 |
)
|
| 54 |
print("Using Groq remote model")
|
| 55 |
-
except Exception as
|
| 56 |
-
print(f"Remote model failed: {
|
| 57 |
raise Exception("No working model configuration found")
|
| 58 |
|
| 59 |
self.agent = CodeAgent(
|
|
@@ -125,19 +133,55 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 125 |
response.raise_for_status()
|
| 126 |
questions_data = response.json()
|
| 127 |
if not questions_data:
|
| 128 |
-
|
| 129 |
-
|
| 130 |
print(f"Fetched {len(questions_data)} questions.")
|
| 131 |
except requests.exceptions.RequestException as e:
|
| 132 |
-
print(f"Error fetching questions: {e}")
|
| 133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
except requests.exceptions.JSONDecodeError as e:
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
except Exception as e:
|
| 139 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
|
| 142 |
# 3. Run your Agent
|
| 143 |
results_log = []
|
|
|
|
| 1 |
import os
|
| 2 |
+
import json
|
| 3 |
import gradio as gr
|
| 4 |
import requests
|
| 5 |
import inspect
|
| 6 |
import pandas as pd
|
| 7 |
|
| 8 |
+
from smolagents import CodeAgent, InferenceClientModel, VisitWebpageTool, PythonInterpreterTool, WebSearchTool, WikipediaSearchTool, FinalAnswerTool, Tool, tool # GoogleSearchTool (usa SERPAPI_API_KEY), DuckDuckGoSearchTool
|
| 9 |
+
from transformers import pipeline
|
| 10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 11 |
+
from litellm import LiteLLM
|
| 12 |
|
| 13 |
# (Keep Constants as is)
|
| 14 |
# --- Constants ---
|
|
|
|
| 39 |
|
| 40 |
# Configurazione con fallback multipli
|
| 41 |
model = None
|
| 42 |
+
|
| 43 |
# Try 1: Modello locale via Transformers
|
| 44 |
try:
|
| 45 |
+
model_id = "microsoft/DialoGPT-small"
|
| 46 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 47 |
+
model = AutoModelForCausalLM.from_pretrained(model_id) # ~500MB
|
| 48 |
+
model = pipeline(
|
| 49 |
+
task="text-generation",
|
| 50 |
+
tokenizer=tokenizer,
|
| 51 |
+
model=model
|
| 52 |
)
|
| 53 |
+
print(f"Using local {model_id} model")
|
| 54 |
except Exception as e:
|
| 55 |
print(f"Local model failed: {e}")
|
| 56 |
+
|
| 57 |
# Try 2: Modello remoto gratuito
|
| 58 |
try:
|
| 59 |
+
model = LiteLLM(
|
| 60 |
model_id="groq/mixtral-8x7b-32768" # Gratuito con registrazione
|
| 61 |
)
|
| 62 |
print("Using Groq remote model")
|
| 63 |
+
except Exception as ex:
|
| 64 |
+
print(f"Remote model failed: {ex}")
|
| 65 |
raise Exception("No working model configuration found")
|
| 66 |
|
| 67 |
self.agent = CodeAgent(
|
|
|
|
| 133 |
response.raise_for_status()
|
| 134 |
questions_data = response.json()
|
| 135 |
if not questions_data:
|
| 136 |
+
print("Fetched questions list is empty.")
|
| 137 |
+
return "Fetched questions list is empty or invalid format.", None
|
| 138 |
print(f"Fetched {len(questions_data)} questions.")
|
| 139 |
except requests.exceptions.RequestException as e:
|
| 140 |
+
print(f"Error fetching questions from API: {e}")
|
| 141 |
+
print("Attempting to load questions from local file 'questions.json'...")
|
| 142 |
+
try:
|
| 143 |
+
with open("questions.json", "r", encoding="utf-8") as f:
|
| 144 |
+
questions_data = json.load(f)
|
| 145 |
+
if not questions_data:
|
| 146 |
+
return "Both API and local questions file are empty.", None
|
| 147 |
+
print(f"Successfully loaded {len(questions_data)} questions from local file.")
|
| 148 |
+
except FileNotFoundError:
|
| 149 |
+
return "Error: Could not fetch questions from API and 'questions.json' file not found.", None
|
| 150 |
+
except json.JSONDecodeError as json_e:
|
| 151 |
+
return f"Error: Could not fetch questions from API and local file has invalid JSON: {json_e}", None
|
| 152 |
+
except Exception as file_e:
|
| 153 |
+
return f"Error: Could not fetch questions from API and failed to read local file: {file_e}", None
|
| 154 |
except requests.exceptions.JSONDecodeError as e:
|
| 155 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 156 |
+
print(f"Response text: {response.text[:500]}")
|
| 157 |
+
print("Attempting to load questions from local file 'questions.json'...")
|
| 158 |
+
try:
|
| 159 |
+
with open("questions.json", "r", encoding="utf-8") as f:
|
| 160 |
+
questions_data = json.load(f)
|
| 161 |
+
if not questions_data:
|
| 162 |
+
return "Both API response is invalid and local questions file is empty.", None
|
| 163 |
+
print(f"Successfully loaded {len(questions_data)} questions from local file.")
|
| 164 |
+
except FileNotFoundError:
|
| 165 |
+
return "Error: Could not decode API response and 'questions.json' file not found.", None
|
| 166 |
+
except json.JSONDecodeError as json_e:
|
| 167 |
+
return f"Error: Could not decode API response and local file has invalid JSON: {json_e}", None
|
| 168 |
+
except Exception as file_e:
|
| 169 |
+
return f"Error: Could not decode API response and failed to read local file: {file_e}", None
|
| 170 |
except Exception as e:
|
| 171 |
print(f"An unexpected error occurred fetching questions: {e}")
|
| 172 |
+
print("Attempting to load questions from local file 'questions.json'...")
|
| 173 |
+
try:
|
| 174 |
+
with open("questions.json", "r", encoding="utf-8") as f:
|
| 175 |
+
questions_data = json.load(f)
|
| 176 |
+
if not questions_data:
|
| 177 |
+
return "Unexpected API error occurred and local questions file is empty.", None
|
| 178 |
+
print(f"Successfully loaded {len(questions_data)} questions from local file.")
|
| 179 |
+
except FileNotFoundError:
|
| 180 |
+
return "Error: Unexpected API error occurred and 'questions.json' file not found.", None
|
| 181 |
+
except json.JSONDecodeError as json_e:
|
| 182 |
+
return f"Error: Unexpected API error occurred and local file has invalid JSON: {json_e}", None
|
| 183 |
+
except Exception as file_e:
|
| 184 |
+
return f"Error: Unexpected API error occurred and failed to read local file: {file_e}", None
|
| 185 |
|
| 186 |
# 3. Run your Agent
|
| 187 |
results_log = []
|
requirements.txt
CHANGED
|
@@ -9,9 +9,11 @@ torch
|
|
| 9 |
tokenizers
|
| 10 |
# Dipendenze per LiteLLM (modelli multipli)
|
| 11 |
litellm
|
| 12 |
-
# Hugging Face Hub per download modelli
|
| 13 |
huggingface_hub
|
|
|
|
|
|
|
|
|
|
| 14 |
# Opzionali ma utili
|
| 15 |
# duckduckgo-search
|
| 16 |
-
# accelerate # Per modelli più grandi
|
| 17 |
# bitsandbytes # Per quantizzazione
|
|
|
|
| 9 |
tokenizers
|
| 10 |
# Dipendenze per LiteLLM (modelli multipli)
|
| 11 |
litellm
|
| 12 |
+
# Hugging Face Hub per download modelli e API
|
| 13 |
huggingface_hub
|
| 14 |
+
# Dipendenze per HfApiModel e integrazione completa HF
|
| 15 |
+
datasets
|
| 16 |
+
accelerate
|
| 17 |
# Opzionali ma utili
|
| 18 |
# duckduckgo-search
|
|
|
|
| 19 |
# bitsandbytes # Per quantizzazione
|