File size: 1,952 Bytes
8360ec7 ad69f02 8360ec7 195bb32 8360ec7 195bb32 8360ec7 195bb32 8360ec7 195bb32 8360ec7 195bb32 8360ec7 ad69f02 8360ec7 195bb32 8360ec7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import time
from openai import OpenAI
import openai
client = None
def json_fix(output):
return output.replace("```json\n", "").replace("\n```", "")
def init_client():
global client
if client is None:
if openai.api_key is None and "OPENAI_API_KEY" not in os.environ:
print("openai_key not presented, delay to initialize.")
return
client = OpenAI()
def request(
user_inputs,
model,
system_role,
temperature=1.0,
return_all=False,
):
init_client()
if type(user_inputs) == str:
chat_histories = [{"role": "user", "content": user_inputs}]
elif type(user_inputs) == list:
if all([type(x) == str for x in user_inputs]):
chat_histories = [
{"role": "user" if i % 2 == 0 else "assistant", "content": x} for i, x in enumerate(user_inputs)
]
elif all([type(x) == dict for x in user_inputs]):
chat_histories = user_inputs
else:
raise ValueError("Invalid input for OpenAI API calling")
else:
raise ValueError("Invalid input for OpenAI API calling")
messages = [{"role": "system", "content": system_role}] + chat_histories
response = client.chat.completions.create(model=model, messages=messages, temperature=temperature)
if return_all:
return response
response_str = ""
for choice in response.choices:
response_str += choice.message.content
response_str = json_fix(response_str)
return response_str
def gpt(user_inputs, model, system_role, temperature=1.0, num_retries=3, waiting=1):
response = None
for _ in range(num_retries):
try:
response = request(user_inputs, model, system_role, temperature=temperature)
break
except openai.OpenAIError as exception:
print(f"{exception}. Retrying...")
time.sleep(waiting)
return response
|