shubhrapandit commited on
Commit
fd6fec7
·
verified ·
1 Parent(s): 72343dc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +24 -25
README.md CHANGED
@@ -204,7 +204,6 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
204
 
205
  </details>
206
 
207
-
208
  ### Single-stream performance (measured with vLLM version 0.7.2)
209
 
210
  <table border="1" class="dataframe">
@@ -213,12 +212,14 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
213
  <th></th>
214
  <th></th>
215
  <th></th>
 
216
  <th style="text-align: center;" colspan="2" >Document Visual Question Answering<br>1680W x 2240H<br>64/128</th>
217
  <th style="text-align: center;" colspan="2" >Visual Reasoning <br>640W x 480H<br>128/128</th>
218
  <th style="text-align: center;" colspan="2" >Image Captioning<br>480W x 360H<br>0/128</th>
219
  </tr>
220
  <tr>
221
  <th>Hardware</th>
 
222
  <th>Model</th>
223
  <th>Average Cost Reduction</th>
224
  <th>Latency (s)</th>
@@ -231,7 +232,8 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
231
  </thead>
232
  <tbody>
233
  <tr>
234
- <td>A100x4</td>
 
235
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
236
  <td></td>
237
  <td>6.4</td>
@@ -242,7 +244,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
242
  <td>113</td>
243
  </tr>
244
  <tr>
245
- <td>A100x2</td>
246
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
247
  <td>1.85</td>
248
  <td>7.0</td>
@@ -253,7 +255,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
253
  <td>211</td>
254
  </tr>
255
  <tr>
256
- <td>A100x1</td>
257
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
258
  <td>3.33</td>
259
  <td>9.4</td>
@@ -264,7 +266,8 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
264
  <td>420</td>
265
  </tr>
266
  <tr>
267
- <td>H100x4</td>
 
268
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
269
  <td></td>
270
  <td>4.3</td>
@@ -275,7 +278,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
275
  <td>100</td>
276
  </tr>
277
  <tr>
278
- <td>H100x2</td>
279
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
280
  <td>1.79</td>
281
  <td>4.6</td>
@@ -286,7 +289,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
286
  <td>177</td>
287
  </tr>
288
  <tr>
289
- <td>H100x1</td>
290
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
291
  <td>5.66</td>
292
  <td>4.3</td>
@@ -329,7 +332,7 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
329
  </thead>
330
  <tbody style="text-align: center">
331
  <tr>
332
- <td>A100x4</td>
333
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
334
  <td></td>
335
  <td>0.4</td>
@@ -340,29 +343,27 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
340
  <td>595</td>
341
  </tr>
342
  <tr>
343
- <td>A100x2</td>
344
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
345
  <td>1.80</td>
346
- <td>0.6</td>
347
  <td>289</td>
348
- <td>2.0</td>
349
  <td>1020</td>
350
- <td>2.3</td>
351
  <td>1133</td>
352
  </tr>
353
  <tr>
354
- <td>A100x1</td>
355
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
356
  <td>2.75</td>
357
- <td>0.7</td>
358
  <td>341</td>
359
- <td>3.2</td>
360
  <td>1588</td>
361
- <td>4.1</td>
362
  <td>2037</td>
363
  </tr>
364
  <tr>
365
- <td>H100x4</td>
366
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
367
  <td></td>
368
  <td>0.5</td>
@@ -373,25 +374,23 @@ The following performance benchmarks were conducted with [vLLM](https://docs.vll
373
  <td>379</td>
374
  </tr>
375
  <tr>
376
- <td>H100x2</td>
377
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
378
  <td>1.73</td>
379
- <td>0.9</td>
380
  <td>247</td>
381
- <td>2.2</td>
382
  <td>621</td>
383
- <td>2.4</td>
384
  <td>669</td>
385
  </tr>
386
  <tr>
387
- <td>H100x1</td>
388
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
389
  <td>8.27</td>
390
- <td>3.3</td>
391
  <td>913</td>
392
- <td>3.3</td>
393
  <td>913</td>
394
- <td>24.8</td>
395
  <td>6777</td>
396
  </tr>
397
  </tbody>
 
204
 
205
  </details>
206
 
 
207
  ### Single-stream performance (measured with vLLM version 0.7.2)
208
 
209
  <table border="1" class="dataframe">
 
212
  <th></th>
213
  <th></th>
214
  <th></th>
215
+ <th></th>
216
  <th style="text-align: center;" colspan="2" >Document Visual Question Answering<br>1680W x 2240H<br>64/128</th>
217
  <th style="text-align: center;" colspan="2" >Visual Reasoning <br>640W x 480H<br>128/128</th>
218
  <th style="text-align: center;" colspan="2" >Image Captioning<br>480W x 360H<br>0/128</th>
219
  </tr>
220
  <tr>
221
  <th>Hardware</th>
222
+ <th>Number of GPUs</th>
223
  <th>Model</th>
224
  <th>Average Cost Reduction</th>
225
  <th>Latency (s)</th>
 
232
  </thead>
233
  <tbody>
234
  <tr>
235
+ <th rowspan="3" valign="top">A100</td>
236
+ <td>4</td>
237
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
238
  <td></td>
239
  <td>6.4</td>
 
244
  <td>113</td>
245
  </tr>
246
  <tr>
247
+ <td>2</td>
248
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
249
  <td>1.85</td>
250
  <td>7.0</td>
 
255
  <td>211</td>
256
  </tr>
257
  <tr>
258
+ <td>1</td>
259
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
260
  <td>3.33</td>
261
  <td>9.4</td>
 
266
  <td>420</td>
267
  </tr>
268
  <tr>
269
+ <th rowspan="3" valign="top">H100</td>
270
+ <td>4</td>
271
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
272
  <td></td>
273
  <td>4.3</td>
 
278
  <td>100</td>
279
  </tr>
280
  <tr>
281
+ <td>2</td>
282
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
283
  <td>1.79</td>
284
  <td>4.6</td>
 
289
  <td>177</td>
290
  </tr>
291
  <tr>
292
+ <td>1</td>
293
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
294
  <td>5.66</td>
295
  <td>4.3</td>
 
332
  </thead>
333
  <tbody style="text-align: center">
334
  <tr>
335
+ <th rowspan="3" valign="top">A100x4</th>
336
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
337
  <td></td>
338
  <td>0.4</td>
 
343
  <td>595</td>
344
  </tr>
345
  <tr>
 
346
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w8a8</td>
347
  <td>1.80</td>
348
+ <td>1.2</td>
349
  <td>289</td>
350
+ <td>4.0</td>
351
  <td>1020</td>
352
+ <td>4.6</td>
353
  <td>1133</td>
354
  </tr>
355
  <tr>
 
356
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
357
  <td>2.75</td>
358
+ <td>2.8</td>
359
  <td>341</td>
360
+ <td>12.8</td>
361
  <td>1588</td>
362
+ <td>16.4</td>
363
  <td>2037</td>
364
  </tr>
365
  <tr>
366
+ <th rowspan="3" valign="top">H100x4</th>
367
  <td>Qwen/Qwen2.5-VL-72B-Instruct</td>
368
  <td></td>
369
  <td>0.5</td>
 
374
  <td>379</td>
375
  </tr>
376
  <tr>
 
377
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-FP8-Dynamic</td>
378
  <td>1.73</td>
379
+ <td>1.8</td>
380
  <td>247</td>
381
+ <td>4.4</td>
382
  <td>621</td>
383
+ <td>4.8</td>
384
  <td>669</td>
385
  </tr>
386
  <tr>
 
387
  <td>neuralmagic/Qwen2.5-VL-72B-Instruct-quantized.w4a16</td>
388
  <td>8.27</td>
389
+ <td>13.2</td>
390
  <td>913</td>
391
+ <td>13.2</td>
392
  <td>913</td>
393
+ <td>99.2</td>
394
  <td>6777</td>
395
  </tr>
396
  </tbody>