Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
update benchmark model
Browse files
backend/config/models_config.py
CHANGED
@@ -21,6 +21,7 @@ DEFAULT_EVALUATION_MODELS = [
|
|
21 |
|
22 |
# Modèles alternatifs à utiliser si le modèle par défaut n'est pas disponible
|
23 |
ALTERNATIVE_BENCHMARK_MODELS = [
|
|
|
24 |
"meta-llama/Llama-3.3-70B-Instruct",
|
25 |
"meta-llama/Llama-3.1-8B-Instruct",
|
26 |
"Qwen/Qwen2.5-72B-Instruct",
|
@@ -32,7 +33,7 @@ ALTERNATIVE_BENCHMARK_MODELS = [
|
|
32 |
]
|
33 |
|
34 |
# Required model for create_bench_config_file.py (only one default model)
|
35 |
-
DEFAULT_BENCHMARK_MODEL = "
|
36 |
|
37 |
# Models by roles for benchmark configuration
|
38 |
# All roles use the default model except chunking
|
|
|
21 |
|
22 |
# Modèles alternatifs à utiliser si le modèle par défaut n'est pas disponible
|
23 |
ALTERNATIVE_BENCHMARK_MODELS = [
|
24 |
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
25 |
"meta-llama/Llama-3.3-70B-Instruct",
|
26 |
"meta-llama/Llama-3.1-8B-Instruct",
|
27 |
"Qwen/Qwen2.5-72B-Instruct",
|
|
|
33 |
]
|
34 |
|
35 |
# Required model for create_bench_config_file.py (only one default model)
|
36 |
+
DEFAULT_BENCHMARK_MODEL = "Qwen/Qwen2.5-32B-Instruct"
|
37 |
|
38 |
# Models by roles for benchmark configuration
|
39 |
# All roles use the default model except chunking
|