Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
remove unusued get_model_provider
Browse files
backend/tasks/evaluation_task.py
CHANGED
@@ -12,7 +12,6 @@ from datetime import datetime
|
|
12 |
import json
|
13 |
import shutil
|
14 |
from typing import List, Dict
|
15 |
-
from tasks.get_model_providers import get_model_providers
|
16 |
from tasks.get_available_model_provider import get_available_model_provider
|
17 |
from huggingface_hub import HfApi
|
18 |
import asyncio
|
|
|
12 |
import json
|
13 |
import shutil
|
14 |
from typing import List, Dict
|
|
|
15 |
from tasks.get_available_model_provider import get_available_model_provider
|
16 |
from huggingface_hub import HfApi
|
17 |
import asyncio
|
backend/tasks/get_model_providers.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from huggingface_hub import model_info
|
2 |
-
PREFERRED_PROVIDERS = ["sambanova", "novita"]
|
3 |
-
|
4 |
-
def filter_providers(providers):
|
5 |
-
"""Filter providers to only include preferred ones."""
|
6 |
-
return [provider for provider in providers if provider in PREFERRED_PROVIDERS]
|
7 |
-
|
8 |
-
def prioritize_providers(providers):
|
9 |
-
"""Prioritize preferred providers, keeping all others."""
|
10 |
-
preferred = [provider for provider in providers if provider in PREFERRED_PROVIDERS]
|
11 |
-
non_preferred = [provider for provider in providers if provider not in PREFERRED_PROVIDERS]
|
12 |
-
return preferred + non_preferred
|
13 |
-
|
14 |
-
def get_model_providers(models, prioritize=True):
|
15 |
-
"""Get model providers, optionally prioritizing preferred ones."""
|
16 |
-
results = []
|
17 |
-
|
18 |
-
for model_name in models:
|
19 |
-
try:
|
20 |
-
info = model_info(model_name, expand="inferenceProviderMapping")
|
21 |
-
if hasattr(info, "inference_provider_mapping"):
|
22 |
-
providers = info.inference_provider_mapping.keys()
|
23 |
-
if prioritize:
|
24 |
-
providers = prioritize_providers(providers)
|
25 |
-
else:
|
26 |
-
providers = filter_providers(providers)
|
27 |
-
else:
|
28 |
-
providers = []
|
29 |
-
results.append((model_name, providers))
|
30 |
-
except Exception as e:
|
31 |
-
results.append((model_name, []))
|
32 |
-
|
33 |
-
return results
|
34 |
-
|
35 |
-
if __name__ == "__main__":
|
36 |
-
example_models = [
|
37 |
-
"Qwen/Qwen2.5-72B-Instruct",
|
38 |
-
"meta-llama/Llama-3.3-70B-Instruct",
|
39 |
-
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
40 |
-
"Qwen/QwQ-32B",
|
41 |
-
"mistralai/Mistral-Small-24B-Instruct-2501"
|
42 |
-
]
|
43 |
-
results = get_model_providers(example_models, prioritize=True)
|
44 |
-
print(results)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|