meg-huggingface commited on
Commit
e6dead6
·
1 Parent(s): 499d1c4

evaluation with hf not hf-auto

Browse files
src/backend/run_eval_suite_harness.py CHANGED
@@ -72,7 +72,7 @@ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int
72
  # no_cache=no_cache,
73
  # output_base_path="logs"
74
  results = evaluator.simple_evaluate(
75
- model="hf-auto", #= "hf-causal-experimental", # "hf-causal"
76
  model_args=model_args,
77
  tasks=task_names,
78
  num_fewshot=num_fewshot,
 
72
  # no_cache=no_cache,
73
  # output_base_path="logs"
74
  results = evaluator.simple_evaluate(
75
+ model="hf", #= "hf-causal-experimental", # "hf-causal"
76
  model_args=model_args,
77
  tasks=task_names,
78
  num_fewshot=num_fewshot,