File size: 1,721 Bytes
d7b7dc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import json
import os
import logging
from datetime import datetime

# from lm_eval import tasks, evaluator, utils
from evaluate_model import Evaluator

from src.envs import RESULTS_REPO, API
from src.backend.manage_requests import EvalRequest

from util import load_dataframe, format_results

logging.getLogger("openai").setLevel(logging.WARNING)

def run_evaluation(eval_request: EvalRequest, num_fewshot, batch_size, device, local_dir: str, results_repo: str, no_cache=True, limit=None):
    if limit:
        print(
            "WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
        )

    # task_names = utils.pattern_match(task_names, tasks.ALL_TASKS)

    # print(f"Selected Tasks: {task_names}")
    evaluator = Evaluator(eval_request.model, eval_request.revision, eval_request.precision, num_fewshot, batch_size, device, no_cache, limit, write_out=True, output_base_path='logs')
    results = evaluator.evaluate()
    
    # results["config"]["model_dtype"] = eval_request.precision
    # results["config"]["model_name"] = eval_request.model
    # results["config"]["model_sha"] = eval_request.revision

    dumped = json.dumps(results, indent=2)
    print(dumped)

    output_path = os.path.join(local_dir, *eval_request.model.split("/"), f"results_{datetime.now()}.json")
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, "w") as f:
        f.write(dumped)

    print(evaluator.make_table(results))

    API.upload_file(
        path_or_fileobj=output_path,
        path_in_repo=f"{eval_request.model}/results_{datetime.now()}.json",
        repo_id=results_repo,
        repo_type="dataset",
    )

    return results