Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 11,236 Bytes
970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 39acd70 2a8ebbd d6b6619 970eef1 a8a8975 39acd70 970eef1 2a8ebbd 970eef1 a8a8975 970eef1 39acd70 a8a8975 970eef1 2a8ebbd a8a8975 39acd70 2a8ebbd 970eef1 83d60af 970eef1 2a8ebbd 39acd70 83d60af 39acd70 83d60af 2a8ebbd 83d60af 2a8ebbd 970eef1 2a8ebbd 83d60af 2a8ebbd a8a8975 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 a8a8975 970eef1 2a8ebbd 39acd70 83d60af 2a8ebbd 970eef1 2a8ebbd 970eef1 f8ec36f 39acd70 970eef1 2a8ebbd 970eef1 d6b6619 a8a8975 d6b6619 83d60af 39acd70 83d60af d6b6619 a8a8975 d6b6619 83d60af 39acd70 83d60af 2a8ebbd d6b6619 2a8ebbd 39acd70 2a8ebbd 970eef1 2a8ebbd 83d60af 2a8ebbd 970eef1 2a8ebbd 83d60af 2a8ebbd 83d60af 39acd70 83d60af 2a8ebbd 39acd70 970eef1 d6b6619 39acd70 970eef1 39acd70 2a8ebbd 970eef1 2a8ebbd d6b6619 a8a8975 d6b6619 2a8ebbd 970eef1 83d60af 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 2a8ebbd 970eef1 d6b6619 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
"""
Task to run evaluation using lighteval
"""
import os
import time
import subprocess
import tempfile
from pathlib import Path
import concurrent.futures
from dotenv import load_dotenv
from datetime import datetime
import json
import shutil
from typing import List, Dict
from tasks.get_model_providers import get_model_providers
from huggingface_hub import HfApi
import asyncio
# Valeur par défaut du timeout
DEFAULT_EVALUATION_TIMEOUT = 60.0 # 1 minute par défaut
class EvaluationTask:
"""
Task to run evaluation using lighteval
"""
def __init__(self, session_uid: str, dataset_name: str, clean_old_results: bool = False, timeout: float = None):
"""
Initialize the evaluation task
Args:
session_uid: Session ID for this task
dataset_name: Name of the dataset to evaluate
clean_old_results: If True, clean old results before evaluation
timeout: Timeout in seconds for each model evaluation (if None, uses default)
"""
self.session_uid = session_uid
self.dataset_name = dataset_name
self.is_completed = False
self.results = []
self.hf_api = HfApi()
self.timeout = timeout if timeout is not None else DEFAULT_EVALUATION_TIMEOUT
# Nettoyer les anciens résultats si demandé
if clean_old_results:
self.clean_old_results()
def clean_old_results(self) -> None:
"""
Nettoie les anciens résultats d'évaluation pour éviter toute confusion
"""
print(f"[{datetime.now().strftime('%H:%M:%S')}] Vérification et nettoyage des anciens résultats...")
# Chemin vers les résultats LightEval
results_dir = Path(f"uploaded_files/{self.session_uid}/lighteval_results")
# Supprimer si existant
if results_dir.exists():
print(f"[{datetime.now().strftime('%H:%M:%S')}] Suppression des anciens résultats LightEval")
shutil.rmtree(results_dir)
print(f"[{datetime.now().strftime('%H:%M:%S')}] Nettoyage terminé")
else:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Aucun ancien résultat trouvé")
# Vérifier aussi les résultats intermédiaires de lighteval
if os.path.exists("data/lighteval_results"):
print(f"[{datetime.now().strftime('%H:%M:%S')}] Nettoyage des résultats intermédiaires")
try:
shutil.rmtree("data/lighteval_results", ignore_errors=True)
except Exception as e:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Erreur lors du nettoyage des résultats intermédiaires: {str(e)}")
def _save_results_to_hub(self) -> None:
"""
Save evaluation results directly to the dataset on the Hub without persisting locally
"""
try:
# Trier les résultats par précision (du plus précis au moins précis)
sorted_results = sorted(self.results, key=lambda x: x.get('accuracy', 0), reverse=True)
# Créer un fichier temporaire pour les résultats
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file:
# Ajouter metadata aux résultats
final_results = {
"metadata": {
"evaluation_date": datetime.now().isoformat(),
"session_id": self.session_uid,
"dataset_name": self.dataset_name
},
"results": sorted_results
}
json.dump(final_results, temp_file, indent=2)
temp_file_path = temp_file.name
# Push to Hub
self.hf_api.upload_file(
path_or_fileobj=temp_file_path,
path_in_repo="lighteval_results.json",
repo_id=self.dataset_name,
repo_type="dataset",
commit_message="Add lighteval evaluation results"
)
print(f"[{datetime.now().strftime('%H:%M:%S')}] Results saved to Hub at {self.dataset_name}/lighteval_results.json")
# Supprimer le fichier temporaire
os.unlink(temp_file_path)
except Exception as e:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Failed to save results to Hub: {str(e)}")
async def _run_lighteval(self, model_name: str, provider: str) -> dict:
start_time = time.time()
print(f"[{datetime.now().strftime('%H:%M:%S')}] Starting evaluation with {provider} provider for {model_name}")
# Create temporary task file
temp_file_path = tempfile.mktemp(suffix=".py")
with open(temp_file_path, 'w') as temp_file:
temp_file.write(f"""
from lighteval_task.lighteval_task import create_yourbench_task
# Create yourbench task
yourbench = create_yourbench_task("{self.dataset_name}", "single_shot_questions")
# Define TASKS_TABLE needed by lighteval
TASKS_TABLE = [yourbench]
""")
# Create output directory in the session folder
output_dir = f"uploaded_files/{self.session_uid}/lighteval_results"
os.makedirs(output_dir, exist_ok=True)
# LightEval command
cmd_args = [
"lighteval",
"endpoint",
"inference-providers",
f"model={model_name},provider={provider}",
"custom|yourbench|0|0",
"--custom-tasks",
temp_file_path,
"--max-samples", "30",
"--output-dir", output_dir,
"--save-details",
"--no-push-to-hub"
]
try:
# Run the command with environment variables and increased timeout of 300 seconds
process = await asyncio.create_subprocess_exec(
*cmd_args,
env=os.environ,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
try:
await asyncio.wait_for(process.communicate(), timeout=self.timeout)
except asyncio.TimeoutError:
process.kill()
print(f"[{datetime.now().strftime('%H:%M:%S')}] Evaluation timed out for {model_name} after {time.time() - start_time:.2f}s")
# Clean up temporary files
os.unlink(temp_file_path)
return {
"model": model_name,
"provider": provider,
"accuracy": 0.0,
"execution_time": self.timeout,
"status": "timeout"
}
except Exception as e:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Error running evaluation for {model_name}: {str(e)}")
# Clean up temporary files
os.unlink(temp_file_path)
return {
"model": model_name,
"provider": provider,
"accuracy": 0.0,
"execution_time": time.time() - start_time,
"status": "error"
}
# Calculate execution time
execution_time = time.time() - start_time
print(f"[{datetime.now().strftime('%H:%M:%S')}] Finished evaluation for {model_name} in {execution_time:.2f}s")
try:
# Get results from the output file
results_dir = Path(output_dir) / "results" / model_name.replace("/", "/")
results_file = next(results_dir.glob("results_*.json"))
with open(results_file) as f:
results = json.load(f)
accuracy = results["results"]["all"]["accuracy"]
result_data = {
"model": model_name,
"provider": provider,
"accuracy": accuracy,
"execution_time": execution_time,
"status": "success"
}
except Exception as e:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Failed to parse results for {model_name} after {execution_time:.2f}s: {str(e)}")
result_data = {
"model": model_name,
"provider": provider,
"accuracy": 0.0,
"execution_time": execution_time,
"status": "parse_error"
}
# Clean up temporary files
os.unlink(temp_file_path)
return result_data
async def run(self, clean_first: bool = True) -> None:
"""
Run the evaluation task asynchronously
Args:
clean_first: If True, clean old results before starting (par défaut: True)
"""
# Nettoyer systématiquement les anciens résultats avant de commencer
self.clean_old_results()
# Start global timer
script_start_time = time.time()
# Load environment variables
load_dotenv()
# Models to evaluate
models = [
"Qwen/QwQ-32B",
"Qwen/Qwen2.5-72B-Instruct",
"deepseek-ai/DeepSeek-V3-0324",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
]
# Get providers for each model
model_providers = get_model_providers(models)
print(f"[{datetime.now().strftime('%H:%M:%S')}] Starting parallel evaluations")
# Run evaluations in parallel using asyncio
tasks = []
for model_name, providers in model_providers:
if providers: # Only run if providers are available
tasks.append(self._run_lighteval(model_name, providers[0]))
self.results = await asyncio.gather(*tasks)
# Calculate total script execution time
total_time = time.time() - script_start_time
print(f"[{datetime.now().strftime('%H:%M:%S')}] All evaluations completed in {total_time:.2f}s")
# Cleanup intermediate results if they exist
if os.path.exists("data/lighteval_results"):
print(f"[{datetime.now().strftime('%H:%M:%S')}] Cleaning up intermediate results")
try:
# Recursively delete intermediate results
import shutil
shutil.rmtree("data/lighteval_results", ignore_errors=True)
except Exception as e:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Warning: Failed to clean up intermediate results: {str(e)}")
# Save final results to Hub (only once)
self._save_results_to_hub()
# Mark the task as completed
self.is_completed = True
def get_logs(self) -> List[str]:
"""
Get logs for this task (empty list since we don't track logs anymore)
Returns:
Empty list of logs
"""
return []
def is_task_completed(self) -> bool:
"""
Check if the task is completed
Returns:
True if completed, False otherwise
"""
return self.is_completed |