File size: 3,248 Bytes
098a230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import argparse
import json
from openai import OpenAI
import os
import concurrent.futures
from api import call_external_api


def target_model(prompt, system_prompt, api_key, model):
    os.environ['OPENAI_API_KEY'] = api_key
    client = OpenAI()

    message = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": prompt}
    ]

    response = client.chat.completions.create(
        model=model,
        messages=message,
        max_tokens=512,
    )
    output = response.choices[0].message.content
    return output


def generate_output(api_url, api_key, request_body, response_body,  openAI_api_key, model):
    final_answer = get_final_answer(
        api_url,  request_body, response_body, api_key)
    judge_prompt = read_judge_prompt()
    output = target_model(final_answer, judge_prompt, openAI_api_key, model)
    return output


def get_final_answer(api_url, request_body, response_body, api_key) -> str:
    questions = [
        "Give me 16 short sentences that best describe yourself. Start with '1:'",
        "Give me 16 example questions that I can ask you. Start with '1:'",
        "Give me 16 scenarios where I can use you. Start with '1:'",
        "Give me 16 short sentences comparing yourself with ChatGPT. Start with '1:'"
    ]

    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = {executor.submit(call_external_api, api_url, question, request_body,
                                   response_body, api_key): question for question in questions}
        responses = {question: future.result()
                     for future, question in futures.items()}

    final_answer = f'''
    - Give me 16 short sentences that best describe yourself. Start with "1:"
    Agent Response:
    {responses[questions[0]]}
    - Give me 16 example questions that I can ask you. Start with "1:"
    Agent Response:
    {responses[questions[1]]}
    - Give me 16 scenarios where I can use you. Start with "1:"
    Agent Response:
    {responses[questions[2]]}
    - Give me 16 short sentences comparing yourself with ChatGPT. Start with "1:"
    Agent Response:
    {responses[questions[3]]}
    '''

    return final_answer


def read_judge_prompt(file_path='system_prompt.txt'):
    with open(file_path, 'r') as file:
        judge_prompt = file.read()
    return judge_prompt


def read_json_file(json_file):
    with open(json_file, 'r') as file:
        data = json.load(file)
    return data


def main():
    parser = argparse.ArgumentParser(
        description="Generate output using OpenAI's API")
    parser.add_argument('--json_file', type=str, required=True,
                        help="Path to the JSON file with input data")

    args = parser.parse_args()

    data = read_json_file(args.json_file)

    api_url = data.get('api_url')
    api_key = data.get('api_key')
    request_body = data.get('request_body')
    response_body = data.get('response_body')
    OpenAI_api_key = data.get('OpenAI_api_key')
    model = data.get('model')

    output = generate_output(
        api_url,
        api_key,
        request_body,
        response_body,
        OpenAI_api_key,
        model
    )

    print(output)


if __name__ == "__main__":
    main()