Dataset Viewer
Auto-converted to Parquet
id
stringlengths
9
49
name
stringlengths
8
58
hf_id
stringlengths
12
49
βŒ€
model_type
stringclasses
1 value
author
stringlengths
3
21
providers
listlengths
1
15
median_input_cost
float64
0
75
median_output_cost
float64
0
150
low_input_cost
float64
0
75
low_output_cost
float64
0
150
high_input_cost
float64
0
75
high_output_cost
float64
0
150
is_open_weights
bool
2 classes
ai21/jamba-1.6-large
AI21: Jamba 1.6 Large
ai21labs/AI21-Jamba-Large-1.6
language
ai21labs
[ { "name": "AI21", "context": 4000, "max_output": 4000, "input": 2, "output": 8, "latency": 0.74, "throughput": 68.82, "images_per_dollar": 0 } ]
2
8
2
8
2
8
true
ai21/jamba-1.6-mini
AI21: Jamba Mini 1.6
ai21labs/AI21-Jamba-Mini-1.6
language
ai21labs
[ { "name": "AI21", "context": 4000, "max_output": 4000, "input": 0.2, "output": 0.4, "latency": null, "throughput": null, "images_per_dollar": 0 } ]
0.2
0.4
0.2
0.4
0.2
0.4
true
cohere/command-a-03-2025
Cohere: Command A
CohereForAI/c4ai-command-a-03-2025
language
CohereForAI
[ { "name": "Cohere", "context": 256000, "max_output": 256000, "input": 2.5, "output": 10, "latency": 0.37, "throughput": 98.04, "images_per_dollar": 0 } ]
2.5
10
2.5
10
2.5
10
true
tokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3
Swallow: Llama 3.1 Swallow 70B Instruct V0.3
tokyotech-llm/Llama-3.1-Swallow-70B-Instruct-v0.3
language
tokyotech-llm
[ { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 0.6, "output": 1.2, "latency": 2.6, "throughput": 198.7, "images_per_dollar": 0 } ]
0.6
1.2
0.6
1.2
0.6
1.2
true
thedrummer/anubis-pro-105b-v1
TheDrummer: Anubis Pro 105B V1
TheDrummer/Anubis-Pro-105B-v1
language
TheDrummer
[ { "name": "Parasail", "context": 64000, "max_output": 64000, "input": 0.8, "output": 0.8, "latency": 1.25, "throughput": 25.56, "images_per_dollar": 0 } ]
0.8
0.8
0.8
0.8
0.8
0.8
true
latitudegames/wayfarer-large-70b-llama-3.3
LatitudeGames: Wayfarer Large 70B Llama 3.3
LatitudeGames/Wayfarer-Large-70B-Llama-3.3
language
LatitudeGames
[ { "name": "Parasail", "context": 128000, "max_output": 128000, "input": 0.7, "output": 0.7, "latency": 1.36, "throughput": 33.52, "images_per_dollar": 0 } ]
0.7
0.7
0.7
0.7
0.7
0.7
true
thedrummer/skyfall-36b-v2
TheDrummer: Skyfall 36B V2
TheDrummer/Skyfall-36B-v2
language
TheDrummer
[ { "name": "Parasail", "context": 33000, "max_output": 33000, "input": 0.5, "output": 0.5, "latency": 2.86, "throughput": 32.7, "images_per_dollar": 0 } ]
0.5
0.5
0.5
0.5
0.5
0.5
true
microsoft/phi-4-multimodal-instruct
Microsoft: Phi 4 Multimodal Instruct
microsoft/Phi-4-multimodal-instruct
language
microsoft
[ { "name": "Parasail", "context": 128000, "max_output": 128000, "input": 0.07, "output": 0.11, "latency": 0.53, "throughput": 112.8, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 131000, "input": 0.07, "output": 0.14, "latency": 0.21, "throughput": 103, "images_per_dollar": 0 } ]
0.07
0.14
0.07
0.11
0.07
0.14
true
qwen/qwq-32b
Qwen: QwQ 32B
Qwen/QwQ-32B
language
Qwen
[ { "name": "DeepInfra", "context": 131000, "max_output": 131000, "input": 0.12, "output": 0.18, "latency": 0.44, "throughput": 39.31, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 131000, "max_output": 131000, "input": 0.15, "output": 0.45, "latency": 0.22, "throughput": 32.07, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.18, "output": 0.2, "latency": 0.95, "throughput": 33.89, "images_per_dollar": 0 }, { "name": "Groq", "context": 131000, "max_output": 131000, "input": 0.29, "output": 0.39, "latency": 0.24, "throughput": 408.9, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 131000, "max_output": 131000, "input": 0.4, "output": 0.4, "latency": 0.85, "throughput": 30.09, "images_per_dollar": 0 }, { "name": "Parasail", "context": 131000, "max_output": 131000, "input": 0.5, "output": 0.5, "latency": 0.95, "throughput": 60.15, "images_per_dollar": 0 }, { "name": "Nebius AI Studio (Fast)", "context": 131000, "max_output": 131000, "input": 0.5, "output": 1.5, "latency": 0.22, "throughput": 75.79, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.9, "output": 0.9, "latency": 0.64, "throughput": 134.4, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 33000, "input": 1.2, "output": 1.2, "latency": 1.02, "throughput": 87.7, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 1.5, "output": 3, "latency": 4.05, "throughput": 654.7, "images_per_dollar": 0 } ]
0.5
0.5
0.12
0.18
1.5
3
true
qwen/qwen2.5-32b-instruct
Qwen: Qwen2.5 32B Instruct
Qwen/Qwen2.5-32B-Instruct
language
Qwen
[ { "name": "Groq", "context": 131000, "max_output": 131000, "input": 0.79, "output": 0.79, "latency": 0.24, "throughput": 196.5, "images_per_dollar": 0 } ]
0.79
0.79
0.79
0.79
0.79
0.79
true
perplexity/r1-1776
Perplexity: R1 1776
perplexity-ai/r1-1776
language
perplexity-ai
[ { "name": "Perplexity", "context": 128000, "max_output": 128000, "input": 2, "output": 8, "latency": 6.33, "throughput": 91.58, "images_per_dollar": 0 } ]
2
8
2
8
2
8
true
meta-llama/llama-guard-3-8b
Llama Guard 3 8B
meta-llama/Llama-Guard-3-8B
language
meta-llama
[ { "name": "Together", "context": 8000, "max_output": 8000, "input": 0.2, "output": 0.1998, "latency": null, "throughput": null, "images_per_dollar": 0 }, { "name": "Groq", "context": 8000, "max_output": 8000, "input": 0.2, "output": 0.2, "latency": null, "throughput": null, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 0.3, "output": 0.3, "latency": 0.7, "throughput": 946.2, "images_per_dollar": 0 } ]
0.2
0.2
0.2
0.1998
0.3
0.3
true
allenai/llama-3.1-tulu-3-405b
Llama 3.1 Tulu 3 405B
allenai/Llama-3.1-Tulu-3-405B
language
allenai
[ { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 5, "output": 10, "latency": 5.07, "throughput": 66.92, "images_per_dollar": 0 } ]
5
10
5
10
5
10
true
deepseek/deepseek-r1-distill-llama-8b
DeepSeek: R1 Distill Llama 8B
meta-llama/Llama-3.1-8B
language
meta-llama
[ { "name": "NovitaAI", "context": 32000, "max_output": 32000, "input": 0.04, "output": 0.04, "latency": 0.56, "throughput": 51.62, "images_per_dollar": 0 } ]
0.04
0.04
0.04
0.04
0.04
0.04
true
aion-labs/aion-1.0-mini
AionLabs: Aion-1.0-Mini
FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview
language
FuseAI
[ { "name": "AionLabs", "context": 33000, "max_output": 33000, "input": 0.7, "output": 1.4, "latency": 1.1, "throughput": 197.8, "images_per_dollar": 0 } ]
0.7
1.4
0.7
1.4
0.7
1.4
true
qwen/qwen2.5-vl-72b-instruct
Qwen: Qwen2.5 VL 72B Instruct
Qwen/Qwen2.5-VL-72B-Instruct
language
Qwen
[ { "name": "Parasail", "context": 32000, "max_output": 32000, "input": 0.7, "output": 0.7, "latency": 2.19, "throughput": 18.04, "images_per_dollar": 0 } ]
0.7
0.7
0.7
0.7
0.7
0.7
true
deepseek/deepseek-r1-distill-qwen-1.5b
DeepSeek: R1 Distill Qwen 1.5B
Qwen/Qwen2.5-Math-1.5B
language
Qwen
[ { "name": "Together", "context": 131000, "max_output": 33000, "input": 0.18, "output": 0.18, "latency": 0.27, "throughput": 394.9, "images_per_dollar": 0 } ]
0.18
0.18
0.18
0.18
0.18
0.18
true
mistralai/mistral-small-24b-instruct-2501
Mistral: Mistral Small 3
mistralai/Mistral-Small-24B-Instruct-2501
language
mistralai
[ { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.07, "output": 0.14, "latency": 0.81, "throughput": 56.67, "images_per_dollar": 0 }, { "name": "Mistral", "context": 33000, "max_output": 33000, "input": 0.1, "output": 0.3, "latency": 0.29, "throughput": 117.2, "images_per_dollar": 0 }, { "name": "Ubicloud", "context": 33000, "max_output": 33000, "input": 0.3, "output": 0.3, "latency": 1.16, "throughput": 31.2, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 2000, "input": 0.8, "output": 0.8, "latency": 0.68, "throughput": 69.78, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 33000, "max_output": 33000, "input": 0.9, "output": 0.9, "latency": 2.15, "throughput": 27.43, "images_per_dollar": 0 } ]
0.3
0.3
0.07
0.14
0.9
0.9
true
deepseek/deepseek-r1-distill-qwen-32b
DeepSeek: R1 Distill Qwen 32B
Qwen/Qwen2.5-32B
language
Qwen
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.12, "output": 0.18, "latency": 0.49, "throughput": 43.15, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 64000, "max_output": 64000, "input": 0.3, "output": 0.3, "latency": 1.06, "throughput": 20.95, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 80000, "max_output": 80000, "input": 0.5, "output": 4.88, "latency": 0.54, "throughput": 33.57, "images_per_dollar": 0 }, { "name": "Groq", "context": 131000, "max_output": 131000, "input": 0.79, "output": 0.79, "latency": 0.22, "throughput": 137.9, "images_per_dollar": 0 } ]
0.79
0.79
0.12
0.18
0.5
4.88
true
deepseek/deepseek-r1-distill-qwen-14b
DeepSeek: R1 Distill Qwen 14B
deepseek-ai/DeepSeek-R1-Distill-Qwen-14B
language
deepseek-ai
[ { "name": "NovitaAI", "context": 64000, "max_output": 64000, "input": 0.15, "output": 0.15, "latency": 2.83, "throughput": 46.91, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 33000, "input": 1.6, "output": 1.6, "latency": 0.35, "throughput": 152.5, "images_per_dollar": 0 } ]
1.6
1.6
0.15
0.15
1.6
1.6
true
deepseek/deepseek-r1-distill-llama-70b
DeepSeek: R1 Distill Llama 70B
deepseek-ai/DeepSeek-R1-Distill-Llama-70B
language
deepseek-ai
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.23, "output": 0.69, "latency": 11.08, "throughput": 52.53, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 128000, "max_output": 128000, "input": 0.25, "output": 0.75, "latency": 7.68, "throughput": 57.53, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.4, "output": 0.4, "latency": 1.88, "throughput": 23.11, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 131000, "max_output": 4000, "input": 0.7, "output": 1.4, "latency": 2.83, "throughput": 472.5, "images_per_dollar": 0 }, { "name": "Groq", "context": 131000, "max_output": 131000, "input": 0.75, "output": 0.99, "latency": 1.78, "throughput": 554.7, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 32000, "max_output": 32000, "input": 0.8, "output": 0.8, "latency": 22.88, "throughput": 51.24, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 33000, "input": 2, "output": 2, "latency": 3.49, "throughput": 185.2, "images_per_dollar": 0 } ]
0.8
0.8
0.4
0.4
2
2
true
deepseek/deepseek-r1
DeepSeek: R1
deepseek-ai/DeepSeek-R1
language
deepseek-ai
[ { "name": "NovitaAI", "context": 64000, "max_output": 16000, "input": 0.7, "output": 2.5, "latency": 1.42, "throughput": 30.73, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 128000, "max_output": 128000, "input": 0.8, "output": 2.4, "latency": 0.37, "throughput": 11.63, "images_per_dollar": 0 }, { "name": "DeepInfra Turbo", "context": 33000, "max_output": 33000, "input": 2, "output": 6, "latency": 0.78, "throughput": 47.45, "images_per_dollar": 0 }, { "name": "inference.net", "context": 131000, "max_output": 33000, "input": 3, "output": 3, "latency": 0.92, "throughput": 40.1, "images_per_dollar": 0 }, { "name": "Parasail", "context": 128000, "max_output": 128000, "input": 3, "output": 3, "latency": 0.98, "throughput": 52.97, "images_per_dollar": 0 }, { "name": "Together", "context": 164000, "max_output": 33000, "input": 3, "output": 7, "latency": 0.68, "throughput": 57.38, "images_per_dollar": 0 }, { "name": "Friendli", "context": 164000, "max_output": 164000, "input": 3, "output": 7, "latency": 0.74, "throughput": 50.1, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 164000, "max_output": 164000, "input": 3, "output": 8, "latency": 4.5, "throughput": 87.87, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 8000, "max_output": 4000, "input": 5, "output": 7, "latency": 4.63, "throughput": 158.7, "images_per_dollar": 0 }, { "name": "kluster.ai", "context": 164000, "max_output": 164000, "input": 7, "output": 7, "latency": 1.66, "throughput": 22.33, "images_per_dollar": 0 }, { "name": "DeepSeek", "context": 64000, "max_output": 8000, "input": 0.55, "output": 2.19, "latency": 16.87, "throughput": 28.22, "images_per_dollar": 0 }, { "name": "Minimax", "context": 64000, "max_output": 64000, "input": 0.55, "output": 2.19, "latency": 1.08, "throughput": 23.35, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 66000, "max_output": 8000, "input": 0.75, "output": 2.4, "latency": 0.7, "throughput": 7.08, "images_per_dollar": 0 }, { "name": "Azure", "context": 164000, "max_output": 4000, "input": 1.485, "output": 5.94, "latency": 13.95, "throughput": 28.88, "images_per_dollar": 0 }, { "name": "Featherless", "context": 33000, "max_output": 4000, "input": 6.5, "output": 8, "latency": 37.22, "throughput": 28.82, "images_per_dollar": 0 } ]
1.485
5.94
0.55
2.19
6.5
8
true
minimax/minimax-01
MiniMax: MiniMax-01
MiniMaxAI/MiniMax-Text-01
language
MiniMaxAI
[ { "name": "Minimax", "context": 1000000, "max_output": 1000000, "input": 0.2, "output": 1.1, "latency": 1.44, "throughput": 30.76, "images_per_dollar": 0 } ]
0.2
1.1
0.2
1.1
0.2
1.1
true
microsoft/phi-4
Microsoft: Phi 4
microsoft/phi-4
language
microsoft
[ { "name": "DeepInfra", "context": 16000, "max_output": 8000, "input": 0.07, "output": 0.14, "latency": 0.08, "throughput": 33.18, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 16000, "max_output": 16000, "input": 0.1, "output": 0.3, "latency": 0.18, "throughput": 48.87, "images_per_dollar": 0 } ]
0.1
0.3
0.07
0.14
0.1
0.3
true
sao10k/l3.1-70b-hanami-x1
Sao10K: Llama 3.1 70B Hanami x1
Sao10K/L3.1-70B-Hanami-x1
language
Sao10K
[ { "name": "Infermatic", "context": 16000, "max_output": 16000, "input": 3, "output": 3, "latency": 1.47, "throughput": 33.74, "images_per_dollar": 0 } ]
3
3
3
3
3
3
true
deepseek/deepseek-chat
DeepSeek: DeepSeek V3
deepseek-ai/DeepSeek-V3
language
deepseek-ai
[ { "name": "NovitaAI", "context": 64000, "max_output": 16000, "input": 0.4, "output": 1.3, "latency": 0.95, "throughput": 30.97, "images_per_dollar": 0 }, { "name": "inference.net", "context": 131000, "max_output": 66000, "input": 1.2, "output": 1.2, "latency": 0.78, "throughput": 20.04, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 12000, "input": 1.25, "output": 1.25, "latency": 0.58, "throughput": 32.16, "images_per_dollar": 0 }, { "name": "DeepSeek", "context": 64000, "max_output": 8000, "input": 0.27, "output": 1.1, "latency": 0.38, "throughput": 9.15, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 66000, "max_output": 8000, "input": 0.49, "output": 0.89, "latency": 0.91, "throughput": 7.44, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 131000, "max_output": 131000, "input": 0.5, "output": 1.5, "latency": 0.33, "throughput": 16.55, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.9, "output": 0.9, "latency": 1.23, "throughput": 52.96, "images_per_dollar": 0 } ]
0.9
0.9
0.27
1.1
1.25
1.25
true
sao10k/l3.3-euryale-70b
Sao10K: Llama 3.3 Euryale 70B
Sao10K/L3.3-70B-Euryale-v2.3
language
Sao10K
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.7, "output": 0.8, "latency": 0.29, "throughput": 39.1, "images_per_dollar": 0 }, { "name": "Infermatic", "context": 16000, "max_output": 16000, "input": 1.5, "output": 1.5, "latency": 0.97, "throughput": 41.37, "images_per_dollar": 0 } ]
1.5
1.5
0.7
0.8
1.5
1.5
true
eva-unit-01/eva-llama-3.33-70b
EVA Llama 3.33 70B
EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
language
EVA-UNIT-01
[ { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 4, "output": 6, "latency": 1.67, "throughput": 11.36, "images_per_dollar": 0 } ]
4
6
4
6
4
6
true
meta-llama/llama-3.3-70b-instruct
Meta: Llama 3.3 70B Instruct
meta-llama/Llama-3.3-70B-Instruct
language
meta-llama
[ { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.12, "output": 0.3, "latency": 0.99, "throughput": 12.96, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.12, "output": 0.3, "latency": 0.3, "throughput": 36.99, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 131000, "max_output": 131000, "input": 0.13, "output": 0.4, "latency": 0.41, "throughput": 20.58, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 131000, "max_output": 131000, "input": 0.39, "output": 0.39, "latency": 0.65, "throughput": 40.09, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 131000, "max_output": 131000, "input": 0.4, "output": 0.4, "latency": 1.11, "throughput": 45.04, "images_per_dollar": 0 }, { "name": "Groq", "context": 33000, "max_output": 33000, "input": 0.59, "output": 0.79, "latency": 0.26, "throughput": 307.8, "images_per_dollar": 0 }, { "name": "Friendli", "context": 131000, "max_output": 131000, "input": 0.6, "output": 0.6, "latency": 0.29, "throughput": 92.63, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 131000, "max_output": 4000, "input": 0.6, "output": 1.2, "latency": 2.14, "throughput": 227.4, "images_per_dollar": 0 }, { "name": "kluster.ai", "context": 131000, "max_output": 131000, "input": 0.7, "output": 0.7, "latency": 1.04, "throughput": 20.54, "images_per_dollar": 0 }, { "name": "Parasail", "context": 131000, "max_output": 131000, "input": 0.7, "output": 0.7, "latency": 0.64, "throughput": 57.02, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 131000, "max_output": 131000, "input": 0.75, "output": 0.75, "latency": 0.83, "throughput": 28.8, "images_per_dollar": 0 }, { "name": "Lepton", "context": 131000, "max_output": 131000, "input": 0.8, "output": 0.8, "latency": 0.39, "throughput": 34.37, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 2000, "input": 0.88, "output": 0.88, "latency": 0.6, "throughput": 121.4, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.9, "output": 0.9, "latency": 0.43, "throughput": 93.86, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.4, "output": 0.4, "latency": 2.02, "throughput": 11.5, "images_per_dollar": 0 } ]
0.59
0.79
0.12
0.3
0.9
0.9
true
qwen/qwq-32b-preview
Qwen: QwQ 32B Preview
Qwen/QwQ-32B-Preview
language
Qwen
[ { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 0.2, "output": 0.2, "latency": 1.27, "throughput": 65.76, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 33000, "max_output": 33000, "input": 0.9, "output": 0.9, "latency": 0.37, "throughput": 65.9, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 33000, "input": 1.2, "output": 1.2, "latency": 0.44, "throughput": 63.94, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 16000, "input": 1.5, "output": 3, "latency": 1.17, "throughput": 236.1, "images_per_dollar": 0 } ]
1.2
1.2
0.2
0.2
1.5
3
true
eva-unit-01/eva-qwen-2.5-72b
EVA Qwen2.5 72B
EVA-UNIT-01/EVA-Qwen2.5-72B-v0.1
language
EVA-UNIT-01
[ { "name": "Parasail", "context": 32000, "max_output": 32000, "input": 0.7, "output": 0.7, "latency": 1.32, "throughput": 30.61, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 4, "output": 6, "latency": 3.27, "throughput": 11.85, "images_per_dollar": 0 } ]
4
6
0.7
0.7
4
6
true
infermatic/mn-inferor-12b
Infermatic: Mistral Nemo Inferor 12B
Infermatic/MN-12B-Inferor-v0.0
language
Infermatic
[ { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 2.07, "throughput": 15.28, "images_per_dollar": 0 } ]
0.8
1.2
0.8
1.2
0.8
1.2
true
qwen/qwen-2.5-coder-32b-instruct
Qwen2.5 Coder 32B Instruct
Qwen/Qwen2.5-Coder-32B-Instruct
language
Qwen
[ { "name": "Lambda", "context": 33000, "max_output": 3000, "input": 0.07, "output": 0.16, "latency": 0.43, "throughput": 68.96, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.07, "output": 0.16, "latency": 0.23, "throughput": 22.44, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 128000, "max_output": 8000, "input": 0.2, "output": 0.2, "latency": 1.01, "throughput": 50.56, "images_per_dollar": 0 }, { "name": "Parasail", "context": 128000, "max_output": 128000, "input": 0.5, "output": 0.5, "latency": 0.57, "throughput": 42.2, "images_per_dollar": 0 }, { "name": "Groq", "context": 131000, "max_output": 131000, "input": 0.79, "output": 0.79, "latency": 0.39, "throughput": 431.8, "images_per_dollar": 0 }, { "name": "Together", "context": 16000, "max_output": 2000, "input": 0.8, "output": 0.8, "latency": 0.46, "throughput": 69.87, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 4000, "input": 0.9, "output": 0.9, "latency": 0.4, "throughput": 58.32, "images_per_dollar": 0 }, { "name": "Mancer", "context": 33000, "max_output": 2000, "input": 1.5, "output": 2.813, "latency": 0.75, "throughput": 17.93, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 1.5, "output": 3, "latency": 2.43, "throughput": 537, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 33000, "max_output": 2000, "input": 2, "output": 3.75, "latency": null, "throughput": null, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 2.6, "output": 3.4, "latency": 3.73, "throughput": 12.54, "images_per_dollar": 0 } ]
0.8
0.8
0.07
0.16
2.6
3.4
true
raifle/sorcererlm-8x22b
SorcererLM 8x22B
rAIfle/SorcererLM-8x22b-bf16
language
rAIfle
[ { "name": "Infermatic", "context": 16000, "max_output": 16000, "input": 4.5, "output": 4.5, "latency": 2.42, "throughput": 7.93, "images_per_dollar": 0 } ]
4.5
4.5
4.5
4.5
4.5
4.5
true
eva-unit-01/eva-qwen-2.5-32b
EVA Qwen2.5 32B
EVA-UNIT-01/EVA-Qwen2.5-32B-v0.2
language
EVA-UNIT-01
[ { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 2.6, "output": 3.4, "latency": 1.37, "throughput": 11.85, "images_per_dollar": 0 } ]
2.6
3.4
2.6
3.4
2.6
3.4
true
thedrummer/unslopnemo-12b
Unslopnemo 12B
TheDrummer/UnslopNemo-12B-v4.1
language
TheDrummer
[ { "name": "Infermatic", "context": 32000, "max_output": 32000, "input": 0.5, "output": 0.5, "latency": 0.53, "throughput": 77.12, "images_per_dollar": 0 } ]
0.5
0.5
0.5
0.5
0.5
0.5
true
neversleep/llama-3.1-lumimaid-70b
NeverSleep: Lumimaid v0.2 70B
NeverSleep/Lumimaid-v0.2-70B
language
NeverSleep
[ { "name": "Mancer", "context": 16000, "max_output": 2000, "input": 3.375, "output": 4.5, "latency": 3.01, "throughput": 13.04, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 4, "output": 6, "latency": 2.13, "throughput": 11.97, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 16000, "max_output": 2000, "input": 4.5, "output": 6, "latency": 1.74, "throughput": 12.42, "images_per_dollar": 0 } ]
4
6
3.375
4.5
4.5
6
true
anthracite-org/magnum-v4-72b
Magnum v4 72B
anthracite-org/magnum-v4-72b
language
anthracite-org
[ { "name": "Mancer", "context": 16000, "max_output": 1000, "input": 1.875, "output": 2.25, "latency": 1.06, "throughput": 12.87, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 16000, "max_output": 1000, "input": 2.5, "output": 3, "latency": 1.4, "throughput": 12.99, "images_per_dollar": 0 }, { "name": "Infermatic", "context": 33000, "max_output": 33000, "input": 3, "output": 3, "latency": 0.86, "throughput": 25.84, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 4, "output": 6, "latency": 1.62, "throughput": 12.14, "images_per_dollar": 0 } ]
3
3
1.875
2.25
4
6
true
qwen/qwen-2.5-7b-instruct
Qwen2.5 7B Instruct
Qwen/Qwen2.5-7B-Instruct
language
Qwen
[ { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.025, "output": 0.05, "latency": 0.2, "throughput": 40.54, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 2000, "input": 0.3, "output": 0.3, "latency": 0.54, "throughput": 99.8, "images_per_dollar": 0 } ]
0.3
0.3
0.025
0.05
0.3
0.3
true
nvidia/llama-3.1-nemotron-70b-instruct
NVIDIA: Llama 3.1 Nemotron 70B Instruct
nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
language
nvidia
[ { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.12, "output": 0.3, "latency": 0.92, "throughput": 35.66, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.12, "output": 0.3, "latency": 0.59, "throughput": 27.05, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 33000, "input": 0.88, "output": 0.88, "latency": 0.71, "throughput": 72.54, "images_per_dollar": 0 }, { "name": "Infermatic", "context": 32000, "max_output": 32000, "input": 1, "output": 1, "latency": 2.35, "throughput": 14.21, "images_per_dollar": 0 } ]
0.88
0.88
0.12
0.3
1
1
true
anthracite-org/magnum-v2-72b
Magnum v2 72B
anthracite-org/magnum-v2-72b
language
anthracite-org
[ { "name": "Infermatic", "context": 33000, "max_output": 33000, "input": 3, "output": 3, "latency": 0.86, "throughput": 43.52, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 4, "output": 6, "latency": 3.46, "throughput": 11.78, "images_per_dollar": 0 } ]
4
6
3
3
4
6
true
thedrummer/rocinante-12b
Rocinante 12B
TheDrummer/Rocinante-12B-v1.1
language
TheDrummer
[ { "name": "Infermatic", "context": 33000, "max_output": 33000, "input": 0.25, "output": 0.5, "latency": 0.71, "throughput": 24.17, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 1.47, "throughput": 13.71, "images_per_dollar": 0 } ]
0.8
1.2
0.25
0.5
0.8
1.2
true
meta-llama/llama-3.2-3b-instruct
Meta: Llama 3.2 3B Instruct
meta-llama/Llama-3.2-3B-Instruct
language
meta-llama
[ { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.015, "output": 0.025, "latency": 0.44, "throughput": 262.7, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.015, "output": 0.025, "latency": 0.16, "throughput": 135.7, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.02, "output": 0.02, "latency": 1.23, "throughput": 71.37, "images_per_dollar": 0 }, { "name": "Lepton", "context": 131000, "max_output": 131000, "input": 0.03, "output": 0.03, "latency": 0.23, "throughput": 185.8, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.03, "output": 0.05, "latency": 0.51, "throughput": 83.12, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 16000, "input": 0.06, "output": 0.06, "latency": 0.45, "throughput": 79.88, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 4000, "max_output": 4000, "input": 0.08, "output": 0.16, "latency": 0.36, "throughput": 1392, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.1, "output": 0.1, "latency": 0.36, "throughput": 144.6, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 131000, "max_output": 131000, "input": 0.1, "output": 0.1, "latency": 0.93, "throughput": 147.7, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 131000, "max_output": 131000, "input": 0.1, "output": 0.1, "latency": 0.47, "throughput": 148.1, "images_per_dollar": 0 } ]
0.06
0.06
0.015
0.025
0.08
0.16
true
meta-llama/llama-3.2-1b-instruct
Meta: Llama 3.2 1B Instruct
meta-llama/Llama-3.2-1B-Instruct
language
meta-llama
[ { "name": "Lepton", "context": 131000, "max_output": 131000, "input": 0.01, "output": 0.01, "latency": 0.33, "throughput": 398.5, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.01, "output": 0.01, "latency": 0.52, "throughput": 127.8, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.01, "output": 0.01, "latency": 0.16, "throughput": 138.2, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 131000, "max_output": 131000, "input": 0.02, "output": 0.02, "latency": 0.6, "throughput": 253.1, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 0.04, "output": 0.08, "latency": 0.51, "throughput": 2202, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 131000, "max_output": 131000, "input": 0.1, "output": 0.1, "latency": 0.57, "throughput": 248.8, "images_per_dollar": 0 } ]
0.02
0.02
0.01
0.01
0.1
0.1
true
meta-llama/llama-3.2-90b-vision-instruct
Meta: Llama 3.2 90B Vision Instruct
meta-llama/Llama-3.2-90B-Vision-Instruct
language
meta-llama
[ { "name": "SambaNova", "context": 4000, "max_output": 4000, "input": 0.8, "output": 1.6, "latency": 0.52, "throughput": 281.8, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.9, "output": 0.9, "latency": 1.28, "throughput": 29.87, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 2000, "input": 1.2, "output": 1.2, "latency": 0.58, "throughput": 26.19, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.35, "output": 0.4, "latency": 0.18, "throughput": 7.53, "images_per_dollar": 0 } ]
1.2
1.2
0.35
0.4
0.8
1.6
true
meta-llama/llama-3.2-11b-vision-instruct
Meta: Llama 3.2 11B Vision Instruct
meta-llama/Llama-3.2-11B-Vision-Instruct
language
meta-llama
[ { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.055, "output": 0.055, "latency": 1.11, "throughput": 40.67, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.055, "output": 0.055, "latency": 0.14, "throughput": 48.06, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.06, "output": 0.06, "latency": 1.04, "throughput": 29.16, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 4000, "max_output": 4000, "input": 0.15, "output": 0.3, "latency": 0.69, "throughput": 890.2, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 131000, "input": 0.18, "output": 0.18, "latency": 0.39, "throughput": 165.1, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 131000, "max_output": 131000, "input": 0.2, "output": 0.2, "latency": 0.78, "throughput": 43.16, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.2, "output": 0.2, "latency": 3.81, "throughput": 52.77, "images_per_dollar": 0 } ]
0.18
0.18
0.055
0.055
0.15
0.3
true
qwen/qwen-2.5-72b-instruct
Qwen2.5 72B Instruct
Qwen/Qwen2.5-72B-Instruct
language
Qwen
[ { "name": "Nebius AI Studio", "context": 128000, "max_output": 128000, "input": 0.13, "output": 0.4, "latency": 0.52, "throughput": 15.49, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.13, "output": 0.4, "latency": 0.29, "throughput": 35.53, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 32000, "max_output": 4000, "input": 0.38, "output": 0.4, "latency": 1.03, "throughput": 19.38, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 0.4, "output": 0.4, "latency": 2.02, "throughput": 19.94, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 33000, "max_output": 33000, "input": 0.9, "output": 0.9, "latency": 0.37, "throughput": 44.19, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 2000, "input": 1.2, "output": 1.2, "latency": 0.54, "throughput": 72.65, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 2, "output": 4, "latency": 2.01, "throughput": 353.9, "images_per_dollar": 0 } ]
0.4
0.4
0.13
0.4
2
4
true
qwen/qwen-2.5-vl-72b-instruct
Qwen: Qwen2.5-VL 72B Instruct
Qwen/Qwen2.5-VL-72B-Instruct
language
Qwen
[ { "name": "Hyperbolic", "context": 4000, "max_output": 4000, "input": 0.6, "output": 0.6, "latency": 6.9, "throughput": 26.8, "images_per_dollar": 0 } ]
0.6
0.6
0.6
0.6
0.6
0.6
true
neversleep/llama-3.1-lumimaid-8b
NeverSleep: Lumimaid v0.2 8B
NeverSleep/Lumimaid-v0.2-8B
language
NeverSleep
[ { "name": "Mancer", "context": 33000, "max_output": 2000, "input": 0.1875, "output": 1.125, "latency": 0.9, "throughput": 34.91, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 33000, "max_output": 2000, "input": 0.25, "output": 1.5, "latency": 0.69, "throughput": 35.51, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 1.82, "throughput": 24.96, "images_per_dollar": 0 } ]
0.25
1.5
0.1875
1.125
0.8
1.2
true
mistralai/pixtral-12b
Mistral: Pixtral 12B
mistralai/Pixtral-12B-2409
language
mistralai
[ { "name": "Hyperbolic", "context": 4000, "max_output": 4000, "input": 0.1, "output": 0.1, "latency": 2.43, "throughput": 61.66, "images_per_dollar": 0 }, { "name": "Mistral", "context": 4000, "max_output": 4000, "input": 0.15, "output": 0.15, "latency": 0.87, "throughput": 88.04, "images_per_dollar": 0 } ]
0.15
0.15
0.1
0.1
0.15
0.15
true
sao10k/l3.1-euryale-70b
Sao10K: Llama 3.1 Euryale 70B v2.2
Sao10K/L3.1-70B-Euryale-v2.2
language
Sao10K
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.7, "output": 0.8, "latency": 0.33, "throughput": 35.58, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 16000, "max_output": 16000, "input": 1.48, "output": 1.48, "latency": 1.86, "throughput": 21.01, "images_per_dollar": 0 }, { "name": "Infermatic", "context": 16000, "max_output": 16000, "input": 1.5, "output": 1.5, "latency": 1.21, "throughput": 20.08, "images_per_dollar": 0 } ]
1.48
1.48
0.7
0.8
1.5
1.5
true
qwen/qwen-2.5-vl-7b-instruct
Qwen: Qwen2.5-VL 7B Instruct
Qwen/Qwen2.5-VL-7B-Instruct
language
Qwen
[ { "name": "Hyperbolic", "context": 4000, "max_output": 4000, "input": 0.2, "output": 0.2, "latency": 0.7, "throughput": 50.31, "images_per_dollar": 0 } ]
0.2
0.2
0.2
0.2
0.2
0.2
true
microsoft/phi-3.5-mini-128k-instruct
Microsoft: Phi-3.5 Mini 128K Instruct
microsoft/Phi-3.5-mini-instruct
language
microsoft
[ { "name": "Azure", "context": 128000, "max_output": 128000, "input": 0.1, "output": 0.1, "latency": 0.72, "throughput": 17.39, "images_per_dollar": 0 } ]
0.1
0.1
0.1
0.1
0.1
0.1
true
nousresearch/hermes-3-llama-3.1-70b
Nous: Hermes 3 70B Instruct
NousResearch/Hermes-3-Llama-3.1-70B
language
NousResearch
[ { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.12, "output": 0.3, "latency": 0.82, "throughput": 24.56, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 12000, "max_output": 12000, "input": 0.4, "output": 0.4, "latency": 1.27, "throughput": 28.04, "images_per_dollar": 0 } ]
0.4
0.4
0.12
0.3
0.4
0.4
true
nousresearch/hermes-3-llama-3.1-405b
Nous: Hermes 3 405B Instruct
NousResearch/Hermes-3-Llama-3.1-405B
language
NousResearch
[ { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.8, "output": 0.8, "latency": 1.12, "throughput": 27.45, "images_per_dollar": 0 }, { "name": "inference.net", "context": 33000, "max_output": 33000, "input": 0.8, "output": 0.8, "latency": null, "throughput": null, "images_per_dollar": 0 }, { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.8, "output": 0.8, "latency": 0.46, "throughput": 16.51, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 131000, "max_output": 131000, "input": 1, "output": 3, "latency": 0.46, "throughput": 26.35, "images_per_dollar": 0 } ]
0.8
0.8
0.8
0.8
1
3
true
sao10k/l3-lunaris-8b
Sao10K: Llama 3 8B Lunaris
Sao10K/L3-8B-Lunaris-v1
language
Sao10K
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.03, "output": 0.06, "latency": 0.41, "throughput": 71.69, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 8000, "max_output": 8000, "input": 0.05, "output": 0.05, "latency": 0.78, "throughput": 75.15, "images_per_dollar": 0 } ]
0.05
0.05
0.03
0.06
0.05
0.05
true
aetherwiing/mn-starcannon-12b
Aetherwiing: Starcannon 12B
intervitens/mini-magnum-12b-v1.1
language
intervitens
[ { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 3.63, "throughput": 15.6, "images_per_dollar": 0 } ]
0.8
1.2
0.8
1.2
0.8
1.2
true
meta-llama/llama-3.1-405b
Meta: Llama 3.1 405B (base)
meta-llama/llama-3.1-405B
language
meta-llama
[ { "name": "Hyperbolic (quantized)", "context": 33000, "max_output": 33000, "input": 2, "output": 2, "latency": 0.95, "throughput": 23.33, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 4, "output": 4, "latency": 2.22, "throughput": 13.79, "images_per_dollar": 0 } ]
4
4
2
2
4
4
true
nothingiisreal/mn-celeste-12b
Mistral Nemo 12B Celeste
nothingiisreal/MN-12B-Celeste-V1.9
language
nothingiisreal
[ { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 1.49, "throughput": 13.81, "images_per_dollar": 0 } ]
0.8
1.2
0.8
1.2
0.8
1.2
true
meta-llama/llama-3.1-405b-instruct
Meta: Llama 3.1 405B Instruct
meta-llama/Meta-Llama-3.1-405B-Instruct
language
meta-llama
[ { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.8, "output": 0.8, "latency": 0.19, "throughput": 5.44, "images_per_dollar": 0 }, { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.8, "output": 0.8, "latency": 0.75, "throughput": 35.43, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 128000, "max_output": 128000, "input": 1, "output": 3, "latency": 0.35, "throughput": 25.52, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 3, "output": 3, "latency": 3.32, "throughput": 39.38, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 131000, "input": 3.5, "output": 3.5, "latency": 8.51, "throughput": 27.32, "images_per_dollar": 0 }, { "name": "kluster.ai", "context": 131000, "max_output": 131000, "input": 3.5, "output": 3.5, "latency": 1.28, "throughput": 18.06, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 4, "output": 4, "latency": 1.87, "throughput": 5.83, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 16000, "max_output": 4000, "input": 5, "output": 10, "latency": 1.56, "throughput": 109.6, "images_per_dollar": 0 } ]
3.5
3.5
0.8
0.8
5
10
true
meta-llama/llama-3.1-8b-instruct
Meta: Llama 3.1 8B Instruct
meta-llama/Meta-Llama-3.1-8B-Instruct
language
meta-llama
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.02, "output": 0.05, "latency": 0.38, "throughput": 118, "images_per_dollar": 0 }, { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.025, "output": 0.04, "latency": 0.46, "throughput": 176.3, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.03, "output": 0.03, "latency": 0.89, "throughput": 58.54, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 16000, "max_output": 8000, "input": 0.05, "output": 0.05, "latency": 0.48, "throughput": 69.26, "images_per_dollar": 0 }, { "name": "Groq", "context": 131000, "max_output": 8000, "input": 0.05, "output": 0.08, "latency": 0.24, "throughput": 701.8, "images_per_dollar": 0 }, { "name": "Lepton", "context": 131000, "max_output": 131000, "input": 0.07, "output": 0.07, "latency": 0.28, "throughput": 97.52, "images_per_dollar": 0 }, { "name": "Friendli", "context": 131000, "max_output": 131000, "input": 0.1, "output": 0.1, "latency": 0.65, "throughput": 227.7, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 0.1, "output": 0.1, "latency": 1.39, "throughput": 75.92, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 131000, "max_output": 4000, "input": 0.1, "output": 0.2, "latency": 0.46, "throughput": 795.5, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 131000, "max_output": 131000, "input": 0.15, "output": 0.15, "latency": 0.38, "throughput": 137.5, "images_per_dollar": 0 }, { "name": "kluster.ai", "context": 131000, "max_output": 131000, "input": 0.18, "output": 0.18, "latency": 0.34, "throughput": 61.82, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 131000, "input": 0.18, "output": 0.18, "latency": 0.37, "throughput": 268.8, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.2, "output": 0.2, "latency": 0.7, "throughput": 220.2, "images_per_dollar": 0 }, { "name": "Avian.io", "context": 131000, "max_output": 131000, "input": 0.2, "output": 0.2, "latency": 0.42, "throughput": 296.9, "images_per_dollar": 0 } ]
0.1
0.1
0.03
0.03
0.2
0.2
true
meta-llama/llama-3.1-70b-instruct
Meta: Llama 3.1 70B Instruct
meta-llama/Meta-Llama-3.1-70B-Instruct
language
meta-llama
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.12, "output": 0.3, "latency": 0.47, "throughput": 32.85, "images_per_dollar": 0 }, { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.12, "output": 0.3, "latency": 0.6, "throughput": 35.11, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 128000, "max_output": 128000, "input": 0.13, "output": 0.4, "latency": 0.32, "throughput": 23.47, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.34, "output": 0.39, "latency": 0.75, "throughput": 57.14, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.4, "output": 0.4, "latency": 0.94, "throughput": 17.6, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 0.4, "output": 0.4, "latency": 1.05, "throughput": 62.02, "images_per_dollar": 0 }, { "name": "Friendli", "context": 131000, "max_output": 131000, "input": 0.6, "output": 0.6, "latency": 0.26, "throughput": 106.8, "images_per_dollar": 0 }, { "name": "SambaNova", "context": 131000, "max_output": 4000, "input": 0.6, "output": 1.2, "latency": 0.57, "throughput": 275.4, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 131000, "max_output": 131000, "input": 0.75, "output": 0.75, "latency": 1.01, "throughput": 18.49, "images_per_dollar": 0 }, { "name": "Lepton", "context": 131000, "max_output": 131000, "input": 0.8, "output": 0.8, "latency": 0.4, "throughput": 37.69, "images_per_dollar": 0 }, { "name": "Together", "context": 131000, "max_output": 131000, "input": 0.88, "output": 0.88, "latency": 0.36, "throughput": 132.6, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 131000, "max_output": 131000, "input": 0.9, "output": 0.9, "latency": 0.44, "throughput": 93.53, "images_per_dollar": 0 } ]
0.6
0.6
0.12
0.3
0.9
0.9
true
mistralai/mistral-nemo
Mistral: Mistral Nemo
mistralai/Mistral-Nemo-Instruct-2407
language
mistralai
[ { "name": "DeepInfra", "context": 131000, "max_output": 8000, "input": 0.035, "output": 0.08, "latency": 0.31, "throughput": 54.4, "images_per_dollar": 0 }, { "name": "Nebius AI Studio", "context": 128000, "max_output": 128000, "input": 0.04, "output": 0.12, "latency": 0.5, "throughput": 16.04, "images_per_dollar": 0 }, { "name": "inference.net", "context": 16000, "max_output": 16000, "input": 0.1, "output": 0.1, "latency": 0.82, "throughput": 62.39, "images_per_dollar": 0 }, { "name": "Parasail", "context": 128000, "max_output": 128000, "input": 0.11, "output": 0.11, "latency": 0.72, "throughput": 132.6, "images_per_dollar": 0 }, { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 0.15, "output": 0.15, "latency": 0.28, "throughput": 124.4, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 131000, "max_output": 131000, "input": 0.17, "output": 0.17, "latency": 0.95, "throughput": 39.57, "images_per_dollar": 0 }, { "name": "Lepton", "context": 128000, "max_output": 128000, "input": 0.18, "output": 0.18, "latency": 0.27, "throughput": 48.74, "images_per_dollar": 0 }, { "name": "Azure", "context": 128000, "max_output": 128000, "input": 0.3, "output": 0.3, "latency": 0.56, "throughput": 101.5, "images_per_dollar": 0 } ]
0.15
0.15
0.035
0.08
0.3
0.3
true
mistralai/codestral-mamba
Mistral: Codestral Mamba
mistralai/mamba-codestral-7B-v0.1
language
mistralai
[ { "name": "Mistral", "context": 256000, "max_output": 256000, "input": 0.25, "output": 0.25, "latency": 0.34, "throughput": 109.7, "images_per_dollar": 0 } ]
0.25
0.25
0.25
0.25
0.25
0.25
true
qwen/qwen-2-7b-instruct
Qwen 2 7B Instruct
Qwen/Qwen2-7B-Instruct
language
Qwen
[ { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.054, "output": 0.054, "latency": 0.53, "throughput": 82.57, "images_per_dollar": 0 } ]
0.054
0.054
0.054
0.054
0.054
0.054
true
google/gemma-2-27b-it
Google: Gemma 2 27B
google/gemma-2-27b-it
language
google
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.27, "output": 0.27, "latency": 0.49, "throughput": 39.88, "images_per_dollar": 0 }, { "name": "Together", "context": 8000, "max_output": 2000, "input": 0.8, "output": 0.8, "latency": 0.5, "throughput": 68.13, "images_per_dollar": 0 } ]
0.8
0.8
0.27
0.27
0.8
0.8
true
alpindale/magnum-72b
Magnum 72B
alpindale/magnum-72b-v1
language
alpindale
[ { "name": "Mancer", "context": 16000, "max_output": 1000, "input": 1.875, "output": 2.25, "latency": 0.63, "throughput": 22.62, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 16000, "max_output": 1000, "input": 2.5, "output": 3, "latency": 1.9, "throughput": 21.99, "images_per_dollar": 0 }, { "name": "Featherless", "context": 16000, "max_output": 4000, "input": 4, "output": 6, "latency": 6.01, "throughput": 12.3, "images_per_dollar": 0 } ]
2.5
3
1.875
2.25
4
6
true
google/gemma-2-9b-it
Google: Gemma 2 9B
google/gemma-2-9b-it
language
google
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.03, "output": 0.06, "latency": 0.58, "throughput": 37.42, "images_per_dollar": 0 }, { "name": "Lepton", "context": 8000, "max_output": 8000, "input": 0.07, "output": 0.07, "latency": 0.44, "throughput": 101.6, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 8000, "max_output": 8000, "input": 0.08, "output": 0.08, "latency": 0.62, "throughput": 28.09, "images_per_dollar": 0 }, { "name": "Groq", "context": 8000, "max_output": 8000, "input": 0.2, "output": 0.2, "latency": 0.25, "throughput": 559.8, "images_per_dollar": 0 }, { "name": "Together", "context": 8000, "max_output": 8000, "input": 0.3, "output": 0.3, "latency": 0.38, "throughput": 107.6, "images_per_dollar": 0 } ]
0.08
0.08
0.03
0.06
0.3
0.3
true
sao10k/l3-euryale-70b
Sao10k: Llama 3 Euryale 70B v2.1
Sao10K/L3-70B-Euryale-v2.1
language
Sao10K
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.7, "output": 0.8, "latency": 0.2, "throughput": 40.29, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 16000, "max_output": 16000, "input": 1.48, "output": 1.48, "latency": 1.54, "throughput": 19.99, "images_per_dollar": 0 } ]
1.48
1.48
0.7
0.8
1.48
1.48
true
cognitivecomputations/dolphin-mixtral-8x22b
Dolphin 2.9.2 Mixtral 8x22B 🐬
cognitivecomputations/dolphin-2.9.2-mixtral-8x22b
language
cognitivecomputations
[ { "name": "NovitaAI", "context": 16000, "max_output": 16000, "input": 0.9, "output": 0.9, "latency": 6.01, "throughput": 9.26, "images_per_dollar": 0 } ]
0.9
0.9
0.9
0.9
0.9
0.9
true
qwen/qwen-2-72b-instruct
Qwen 2 72B Instruct
Qwen/Qwen2-72B-Instruct
language
Qwen
[ { "name": "Together", "context": 33000, "max_output": 4000, "input": 0.9, "output": 0.9, "latency": 0.45, "throughput": 67.36, "images_per_dollar": 0 } ]
0.9
0.9
0.9
0.9
0.9
0.9
true
mistralai/mistral-7b-instruct
Mistral: Mistral 7B Instruct
mistralai/Mistral-7B-Instruct-v0.3
language
mistralai
[ { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.03, "output": 0.055, "latency": 0.29, "throughput": 82.24, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.059, "output": 0.059, "latency": 0.87, "throughput": 116.6, "images_per_dollar": 0 }, { "name": "Parasail", "context": 33000, "max_output": 33000, "input": 0.11, "output": 0.11, "latency": 0.63, "throughput": 131.9, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 4000, "input": 0.2, "output": 0.2, "latency": 0.34, "throughput": 163.7, "images_per_dollar": 0 }, { "name": "Lepton", "context": 33000, "max_output": 33000, "input": 0.07, "output": 0.07, "latency": 0.64, "throughput": 106.3, "images_per_dollar": 0 } ]
0.07
0.07
0.03
0.055
0.2
0.2
true
mistralai/mistral-7b-instruct-v0.3
Mistral: Mistral 7B Instruct v0.3
mistralai/Mistral-7B-Instruct-v0.3
language
mistralai
[ { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.03, "output": 0.055, "latency": 0.18, "throughput": 91.79, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 33000, "max_output": 33000, "input": 0.059, "output": 0.059, "latency": 0.59, "throughput": 137.3, "images_per_dollar": 0 }, { "name": "Lepton", "context": 33000, "max_output": 33000, "input": 0.07, "output": 0.07, "latency": 0.42, "throughput": 108, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 4000, "input": 0.2, "output": 0.2, "latency": 0.22, "throughput": 188.7, "images_per_dollar": 0 } ]
0.07
0.07
0.03
0.055
0.2
0.2
true
nousresearch/hermes-2-pro-llama-3-8b
NousResearch: Hermes 2 Pro - Llama-3 8B
NousResearch/Hermes-2-Pro-Llama-3-8B
language
NousResearch
[ { "name": "Lambda", "context": 131000, "max_output": 131000, "input": 0.025, "output": 0.04, "latency": 0.44, "throughput": 152.2, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 8000, "max_output": 8000, "input": 0.14, "output": 0.14, "latency": 0.88, "throughput": 131.2, "images_per_dollar": 0 } ]
0.14
0.14
0.025
0.04
0.14
0.14
true
microsoft/phi-3-mini-128k-instruct
Microsoft: Phi-3 Mini 128K Instruct
microsoft/Phi-3-mini-128k-instruct
language
microsoft
[ { "name": "Azure", "context": 128000, "max_output": 128000, "input": 0.1, "output": 0.1, "latency": 0.57, "throughput": 92.71, "images_per_dollar": 0 } ]
0.1
0.1
0.1
0.1
0.1
0.1
true
microsoft/phi-3-medium-128k-instruct
Microsoft: Phi-3 Medium 128K Instruct
microsoft/Phi-3-medium-128k-instruct
language
microsoft
[ { "name": "Azure", "context": 128000, "max_output": 128000, "input": 1, "output": 1, "latency": 0.75, "throughput": 45.33, "images_per_dollar": 0 } ]
1
1
1
1
1
1
true
neversleep/llama-3-lumimaid-70b
NeverSleep: Llama 3 Lumimaid 70B
NeverSleep/Llama-3-Lumimaid-70B-v0.1
language
NeverSleep
[ { "name": "Mancer", "context": 8000, "max_output": 2000, "input": 3.375, "output": 4.5, "latency": 0.82, "throughput": 14.27, "images_per_dollar": 0 }, { "name": "Featherless", "context": 8000, "max_output": 4000, "input": 4, "output": 6, "latency": 2.66, "throughput": 12.17, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 8000, "max_output": 2000, "input": 4.5, "output": 6, "latency": 0.82, "throughput": 13.58, "images_per_dollar": 0 } ]
4
6
3.375
4.5
4.5
6
true
meta-llama/llama-guard-2-8b
Meta: LlamaGuard 2 8B
meta-llama/LlamaGuard-7b
language
meta-llama
[ { "name": "Together", "context": 8000, "max_output": 8000, "input": 0.2, "output": 0.2, "latency": 0.68, "throughput": 76.24, "images_per_dollar": 0 } ]
0.2
0.2
0.2
0.2
0.2
0.2
true
neversleep/llama-3-lumimaid-8b
NeverSleep: Llama 3 Lumimaid 8B (extended)
NeverSleep/Llama-3-Lumimaid-8B-v0.1
language
NeverSleep
[ { "name": "Mancer", "context": 25000, "max_output": 2000, "input": 0.1875, "output": 1.125, "latency": 0.78, "throughput": 51.24, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 25000, "max_output": 2000, "input": 0.25, "output": 1.5, "latency": 0.53, "throughput": 48.42, "images_per_dollar": 0 } ]
0.25
1.5
0.1875
1.125
0.25
1.5
true
neversleep/llama-3-lumimaid-8b
NeverSleep: Llama 3 Lumimaid 8B
NeverSleep/Llama-3-Lumimaid-8B-v0.1
language
NeverSleep
[ { "name": "Mancer", "context": 25000, "max_output": 2000, "input": 0.1875, "output": 1.125, "latency": 0.77, "throughput": 51.16, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 25000, "max_output": 2000, "input": 0.25, "output": 1.5, "latency": 0.53, "throughput": 48.42, "images_per_dollar": 0 }, { "name": "Featherless", "context": 8000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 1.58, "throughput": 25.1, "images_per_dollar": 0 } ]
0.25
1.5
0.1875
1.125
0.8
1.2
true
sao10k/fimbulvetr-11b-v2
Fimbulvetr 11B v2
Sao10K/Fimbulvetr-11B-v2
language
Sao10K
[ { "name": "Featherless", "context": 4000, "max_output": 4000, "input": 0.8, "output": 1.2, "latency": 1.57, "throughput": 25.04, "images_per_dollar": 0 } ]
0.8
1.2
0.8
1.2
0.8
1.2
true
meta-llama/llama-3-8b-instruct
Meta: Llama 3 8B Instruct
meta-llama/Meta-Llama-3-8B-Instruct
language
meta-llama
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.03, "output": 0.06, "latency": 0.25, "throughput": 99.6, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 8000, "max_output": 8000, "input": 0.04, "output": 0.04, "latency": 0.65, "throughput": 42.94, "images_per_dollar": 0 }, { "name": "Groq", "context": 8000, "max_output": 8000, "input": 0.05, "output": 0.08, "latency": 0.27, "throughput": 657.9, "images_per_dollar": 0 }, { "name": "Cloudflare", "context": 8000, "max_output": 8000, "input": 0.15, "output": 0.15, "latency": 0.79, "throughput": 15.48, "images_per_dollar": 0 }, { "name": "Mancer", "context": 16000, "max_output": 2000, "input": 0.1875, "output": 1.125, "latency": 0.55, "throughput": 51.28, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 8000, "max_output": 8000, "input": 0.2, "output": 0.2, "latency": 0.5, "throughput": 108.8, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 16000, "max_output": 2000, "input": 0.25, "output": 1.5, "latency": 0.61, "throughput": 50.69, "images_per_dollar": 0 } ]
0.15
0.15
0.04
0.04
0.25
1.5
true
meta-llama/llama-3-70b-instruct
Meta: Llama 3 70B Instruct
meta-llama/Meta-Llama-3-70B-Instruct
language
meta-llama
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.23, "output": 0.4, "latency": 0.42, "throughput": 49.1, "images_per_dollar": 0 }, { "name": "Hyperbolic", "context": 33000, "max_output": 33000, "input": 0.4, "output": 0.4, "latency": 1.72, "throughput": 23.92, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 8000, "max_output": 8000, "input": 0.51, "output": 0.74, "latency": 1.29, "throughput": 24.16, "images_per_dollar": 0 }, { "name": "Groq", "context": 8000, "max_output": 8000, "input": 0.59, "output": 0.79, "latency": 0.15, "throughput": 272, "images_per_dollar": 0 }, { "name": "Together", "context": 8000, "max_output": 8000, "input": 0.88, "output": 0.88, "latency": 0.39, "throughput": 17.25, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 8000, "max_output": 8000, "input": 0.9, "output": 0.9, "latency": 0.42, "throughput": 168.8, "images_per_dollar": 0 } ]
0.59
0.79
0.23
0.4
0.9
0.9
true
mistralai/mixtral-8x22b-instruct
Mistral: Mixtral 8x22B Instruct
mistralai/Mixtral-8x22B-Instruct-v0.1
language
mistralai
[ { "name": "Fireworks", "context": 66000, "max_output": 66000, "input": 0.9, "output": 0.9, "latency": 0.67, "throughput": 79.41, "images_per_dollar": 0 }, { "name": "Together", "context": 66000, "max_output": 2000, "input": 1.2, "output": 1.2, "latency": 0.76, "throughput": 96.59, "images_per_dollar": 0 }, { "name": "Mistral", "context": 66000, "max_output": 66000, "input": 2, "output": 6, "latency": 0.41, "throughput": 96.74, "images_per_dollar": 0 } ]
1.2
1.2
0.9
0.9
2
6
true
microsoft/wizardlm-2-8x22b
WizardLM-2 8x22B
microsoft/WizardLM-2-8x22B
language
microsoft
[ { "name": "DeepInfra", "context": 66000, "max_output": 8000, "input": 0.5, "output": 0.5, "latency": 0.11, "throughput": 24.57, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 66000, "max_output": 66000, "input": 0.62, "output": 0.62, "latency": 0.7, "throughput": 27.93, "images_per_dollar": 0 }, { "name": "Lepton", "context": 66000, "max_output": 66000, "input": 1, "output": 1, "latency": 0.33, "throughput": 40.83, "images_per_dollar": 0 }, { "name": "Together", "context": 66000, "max_output": 66000, "input": 1.2, "output": 1.2, "latency": 7.89, "throughput": 13.53, "images_per_dollar": 0 } ]
1
1
0.5
0.5
1.2
1.2
true
microsoft/wizardlm-2-7b
WizardLM-2 7B
microsoft/WizardLM-2-7B
language
microsoft
[ { "name": "Lepton", "context": 32000, "max_output": 32000, "input": 0.07, "output": 0.07, "latency": 0.37, "throughput": 102, "images_per_dollar": 0 } ]
0.07
0.07
0.07
0.07
0.07
0.07
true
sophosympatheia/midnight-rose-70b
Midnight Rose 70B
sophosympatheia/Wizard-Tulu-Dolphin-70B-v1.0
language
sophosympatheia
[ { "name": "NovitaAI", "context": 4000, "max_output": 4000, "input": 0.8, "output": 0.8, "latency": 2.18, "throughput": 15.64, "images_per_dollar": 0 } ]
0.8
0.8
0.8
0.8
0.8
0.8
true
google/gemma-7b-it
Google: Gemma 7B
google/gemma-1.1-7b-it
language
google
[ { "name": "Cloudflare", "context": 8000, "max_output": 8000, "input": 0.15, "output": 0.15, "latency": 0.52, "throughput": 13.08, "images_per_dollar": 0 } ]
0.15
0.15
0.15
0.15
0.15
0.15
true
nousresearch/nous-hermes-2-mixtral-8x7b-dpo
Nous: Hermes 2 Mixtral 8x7B DPO
NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
language
NousResearch
[ { "name": "Together", "context": 33000, "max_output": 2000, "input": 0.6, "output": 0.6, "latency": 0.47, "throughput": 87.12, "images_per_dollar": 0 } ]
0.6
0.6
0.6
0.6
0.6
0.6
true
mistralai/mistral-7b-instruct-v0.2
Mistral: Mistral 7B Instruct v0.2
mistralai/Mistral-7B-Instruct-v0.2
language
mistralai
[ { "name": "Together", "context": 33000, "max_output": 33000, "input": 0.2, "output": 0.2, "latency": null, "throughput": null, "images_per_dollar": 0 } ]
0.2
0.2
0.2
0.2
0.2
0.2
true
cognitivecomputations/dolphin-mixtral-8x7b
Dolphin 2.6 Mixtral 8x7B 🐬
cognitivecomputations/dolphin-2.6-mixtral-8x7b
language
cognitivecomputations
[ { "name": "Lepton", "context": 33000, "max_output": 33000, "input": 0.5, "output": 0.5, "latency": 0.14, "throughput": 85.06, "images_per_dollar": 0 } ]
0.5
0.5
0.5
0.5
0.5
0.5
true
mistralai/mixtral-8x7b
Mistral: Mixtral 8x7B (base)
mistralai/Mixtral-8x7B-v0.1
language
mistralai
[ { "name": "Together", "context": 33000, "max_output": 2000, "input": 0.6, "output": 0.6, "latency": 0.48, "throughput": 119.1, "images_per_dollar": 0 } ]
0.6
0.6
0.6
0.6
0.6
0.6
true
mistralai/mixtral-8x7b-instruct
Mistral: Mixtral 8x7B Instruct
mistralai/Mixtral-8x7B-Instruct-v0.1
language
mistralai
[ { "name": "DeepInfra", "context": 33000, "max_output": 8000, "input": 0.24, "output": 0.24, "latency": 0.4, "throughput": 106.9, "images_per_dollar": 0 }, { "name": "Groq", "context": 33000, "max_output": 33000, "input": 0.24, "output": 0.24, "latency": 0.24, "throughput": 709.6, "images_per_dollar": 0 }, { "name": "Fireworks", "context": 33000, "max_output": 33000, "input": 0.5, "output": 0.5, "latency": 0.6, "throughput": 199.9, "images_per_dollar": 0 }, { "name": "Lepton", "context": 33000, "max_output": 33000, "input": 0.5, "output": 0.5, "latency": 0.34, "throughput": 85.96, "images_per_dollar": 0 }, { "name": "Together", "context": 33000, "max_output": 2000, "input": 0.6, "output": 0.6, "latency": 0.48, "throughput": 103.5, "images_per_dollar": 0 } ]
0.5
0.5
0.24
0.24
0.6
0.6
true
openchat/openchat-7b
OpenChat 3.5 7B
openchat/openchat-3.5-0106
language
openchat
[ { "name": "DeepInfra", "context": 8000, "max_output": 8000, "input": 0.055, "output": 0.055, "latency": 0.24, "throughput": 99, "images_per_dollar": 0 }, { "name": "NovitaAI", "context": 4000, "max_output": 4000, "input": 0.06, "output": 0.06, "latency": 0.54, "throughput": 61.65, "images_per_dollar": 0 }, { "name": "Lepton", "context": 8000, "max_output": 8000, "input": 0.07, "output": 0.07, "latency": 0.43, "throughput": 106.2, "images_per_dollar": 0 } ]
0.06
0.06
0.055
0.055
0.07
0.07
true
neversleep/noromaid-20b
Noromaid 20B
NeverSleep/Noromaid-20b-v0.1.1
language
NeverSleep
[ { "name": "Mancer", "context": 8000, "max_output": 2000, "input": 1.5, "output": 2.25, "latency": 0.72, "throughput": 24.01, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 8000, "max_output": 2000, "input": 2, "output": 3, "latency": 0.96, "throughput": 23.68, "images_per_dollar": 0 } ]
2
3
1.5
2.25
2
3
true
teknium/openhermes-2.5-mistral-7b
OpenHermes 2.5 Mistral 7B
teknium/OpenHermes-2.5-Mistral-7B
language
teknium
[ { "name": "NovitaAI", "context": 4000, "max_output": 4000, "input": 0.17, "output": 0.17, "latency": 0.58, "throughput": 149, "images_per_dollar": 0 } ]
0.17
0.17
0.17
0.17
0.17
0.17
true
undi95/toppy-m-7b
Toppy M 7B
Undi95/Toppy-M-7B
language
Undi95
[ { "name": "Lepton", "context": 4000, "max_output": 4000, "input": 0.07, "output": 0.07, "latency": 0.44, "throughput": 106.9, "images_per_dollar": 0 } ]
0.07
0.07
0.07
0.07
0.07
0.07
true
alpindale/goliath-120b
Goliath 120B
alpindale/goliath-120b
language
alpindale
[ { "name": "Mancer", "context": 6000, "max_output": 512, "input": 9.375, "output": 9.375, "latency": 1.23, "throughput": 17.97, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 6000, "max_output": 512, "input": 12.5, "output": 12.5, "latency": 1.37, "throughput": 16.05, "images_per_dollar": 0 } ]
12.5
12.5
9.375
9.375
12.5
12.5
true
jondurbin/airoboros-l2-70b
Airoboros 70B
jondurbin/airoboros-l2-70b-2.2.1
language
jondurbin
[ { "name": "NovitaAI", "context": 4000, "max_output": 4000, "input": 0.5, "output": 0.5, "latency": 2.17, "throughput": 56.16, "images_per_dollar": 0 } ]
0.5
0.5
0.5
0.5
0.5
0.5
true
xwin-lm/xwin-lm-70b
Xwin 70B
Xwin-LM/Xwin-LM-70B-V0.1
language
Xwin-LM
[ { "name": "Mancer", "context": 8000, "max_output": 512, "input": 3.75, "output": 3.75, "latency": 1.42, "throughput": 18.13, "images_per_dollar": 0 }, { "name": "Mancer (private)", "context": 8000, "max_output": 512, "input": 5, "output": 5, "latency": 1.64, "throughput": 18.07, "images_per_dollar": 0 } ]
5
5
3.75
3.75
5
5
true
End of preview. Expand in Data Studio

Open Inference Pricing πŸ’°

This dataset is unofficial and for research purposes only.

This dataset aims to capture pricing and other metrics between open and closed providers.

Updated Semi Regularly

Downloads last month
146