File size: 1,735 Bytes
acd7cf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import os
import argparse
import asyncio
from dotenv import load_dotenv

from .models import NetworkXStorage, JsonKVStorage, OpenAIModel
from .operators import judge_statement

sys_path = os.path.abspath(os.path.dirname(__file__))

load_dotenv()

def calculate_average_loss(graph: NetworkXStorage):
    """
    Calculate the average loss of the graph.

    :param graph: NetworkXStorage
    :return: float
    """
    edges = asyncio.run(graph.get_all_edges())
    total_loss = 0
    for edge in edges:
        total_loss += edge[2]['loss']
    return total_loss / len(edges)



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', type=str, default=os.path.join(sys_path, "cache"), help='path to load input graph')
    parser.add_argument('--output', type=str, default='cache/output/new_graph.graphml', help='path to save output')

    args = parser.parse_args()

    llm_client = OpenAIModel(
        model_name=os.getenv("TRAINEE_MODEL"),
        api_key=os.getenv("TRAINEE_API_KEY"),
        base_url=os.getenv("TRAINEE_BASE_URL")
    )

    graph_storage = NetworkXStorage(
        args.input,
        namespace="graph"
    )
    average_loss = calculate_average_loss(graph_storage)
    print(f"Average loss of the graph: {average_loss}")

    rephrase_storage = JsonKVStorage(
        os.path.join(sys_path, "cache"),
        namespace="rephrase"
    )

    new_graph = asyncio.run(judge_statement(llm_client, graph_storage, rephrase_storage, re_judge=True))

    graph_file = asyncio.run(graph_storage.get_graph())

    new_graph.write_nx_graph(graph_file, args.output)

    average_loss = calculate_average_loss(new_graph)
    print(f"Average loss of the graph: {average_loss}")