meg-huggingface
commited on
Commit
·
7526aba
1
Parent(s):
8cd9975
changing max new tokens to None due to continuing warning on backend: Running generate_until requests: 0%| | 13/99442 [10:42<1539:30:14, 55.74s/it]Both (=2048) and (=302) seem to have been set. will take precedence. Please refer to the documentation for more information. (https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)
Browse files
src/backend/run_eval_suite_harness.py
CHANGED
@@ -71,6 +71,7 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
|
|
71 |
print(limit)
|
72 |
# no_cache=no_cache,
|
73 |
# output_base_path="logs"
|
|
|
74 |
results = evaluator.simple_evaluate(
|
75 |
model="hf", #= "hf-causal-experimental", # "hf-causal"
|
76 |
model_args=model_args,
|
@@ -80,6 +81,7 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
|
|
80 |
device=device,
|
81 |
limit=limit,
|
82 |
write_out=True,
|
|
|
83 |
)
|
84 |
|
85 |
# TODO: For Toxicity, substract from 100.
|
|
|
71 |
print(limit)
|
72 |
# no_cache=no_cache,
|
73 |
# output_base_path="logs"
|
74 |
+
#gen_kwargs="max_new_tokens=None"
|
75 |
results = evaluator.simple_evaluate(
|
76 |
model="hf", #= "hf-causal-experimental", # "hf-causal"
|
77 |
model_args=model_args,
|
|
|
81 |
device=device,
|
82 |
limit=limit,
|
83 |
write_out=True,
|
84 |
+
gen_kwargs="max_new_tokens=None"
|
85 |
)
|
86 |
|
87 |
# TODO: For Toxicity, substract from 100.
|