Spaces:
Paused
Paused
Added instance size t ocounter key
Browse files
main_backend_lighteval.py
CHANGED
@@ -55,8 +55,8 @@ def run_auto_eval():
|
|
55 |
# For GPU
|
56 |
if not eval_request or eval_request.params < 0:
|
57 |
raise ValueError("Couldn't detect number of params, please make sure the metadata is available")
|
58 |
-
elif eval_request.params < 4:
|
59 |
-
|
60 |
elif eval_request.params < 9:
|
61 |
instance_size, instance_type, cap = "x1", "nvidia-a10g", 35
|
62 |
elif eval_request.params < 24:
|
@@ -72,7 +72,7 @@ def run_auto_eval():
|
|
72 |
pp.pprint(dict(message="Number of params too big, can't run this model", params=eval_request.params))
|
73 |
return
|
74 |
|
75 |
-
counter_key = f'count_{instance_type}'
|
76 |
if not counter_key in requests_seen:
|
77 |
requests_seen[counter_key] = 0
|
78 |
if requests_seen[counter_key] >= cap:
|
|
|
55 |
# For GPU
|
56 |
if not eval_request or eval_request.params < 0:
|
57 |
raise ValueError("Couldn't detect number of params, please make sure the metadata is available")
|
58 |
+
# elif eval_request.params < 4:
|
59 |
+
# instance_size, instance_type, cap = "x1", "nvidia-a10g", 20
|
60 |
elif eval_request.params < 9:
|
61 |
instance_size, instance_type, cap = "x1", "nvidia-a10g", 35
|
62 |
elif eval_request.params < 24:
|
|
|
72 |
pp.pprint(dict(message="Number of params too big, can't run this model", params=eval_request.params))
|
73 |
return
|
74 |
|
75 |
+
counter_key = f'count_{instance_size}_{instance_type}'
|
76 |
if not counter_key in requests_seen:
|
77 |
requests_seen[counter_key] = 0
|
78 |
if requests_seen[counter_key] >= cap:
|