DataEngEval / config /models.yaml
uparekh01151's picture
remove: Groq provider models from configuration
50211fb
models:
# Llama-3.1-8B-Instruct with Nebius Provider
- name: "Llama-3.1-8B-Instruct"
provider: "nebius"
model_id: "meta-llama/Llama-3.1-8B-Instruct"
params:
max_new_tokens: 256
temperature: 0.1
top_p: 0.9
description: "Llama-3.1-8B-Instruct - Meta's instruction-following model via Nebius"
# GPT-OSS-120B with Nebius Provider
- name: "GPT-OSS-120B"
provider: "nebius"
model_id: "openai/gpt-oss-120b"
params:
max_new_tokens: 256
temperature: 0.1
top_p: 0.9
description: "GPT-OSS-120B - OpenAI's large language model via Nebius"
# GEMMA-2-2B-IT with nebius Provider
- name: "GEMMA-2-2B-IT"
provider: "nebius"
model_id: "google/gemma-2-2b-it"
params:
max_new_tokens: 256
temperature: 0.1
top_p: 0.9
description: "Gemma is a family of lightweight, state-of-the-art open models from Google via Nebius"
# DeepSeek-R1 with Together Provider
- name: "DeepSeek-R1"
provider: "together"
model_id: "deepseek-ai/DeepSeek-R1"
params:
max_new_tokens: 256
temperature: 0.1
top_p: 0.9
description: "DeepSeek-R1 - DeepSeek's reasoning model via Together AI"