import time import openai import requests import streamlit as st OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"] # app.pinecone.io OPENAI_ORGANIZATION_ID = st.secrets["OPENAI_ORGANIZATION_ID"] headers = {"Content-Type": "application/json", "Authorization": f"Bearer {OPENAI_API_KEY}" } SEED = 42 def get_client(): return openai.OpenAI(api_key = OPENAI_API_KEY,organization=OPENAI_ORGANIZATION_ID) def call_openai(prompt, engine="gpt-3.5-turbo", temp=0, top_p=1.0, max_tokens=4048): if st.session_state.report_type=="assistant": try: thread = st.session_state.assistant_thread assistant_id = st.session_state.assistant_id message = st.session_state.openai_client.beta.threads.messages.create( thread.id, role="user", content=prompt, ) run = st.session_state.openai_client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id, instructions="Please address the user as Dan" ) messages = [] while True: # Retrieve the run status run_status = st.session_state.openai_client.beta.threads.runs.retrieve( thread_id=thread.id, run_id=run.id ) # Check and print the step details run_steps = st.session_state.openai_client.beta.threads.runs.steps.list( thread_id=thread.id, run_id=run.id ) for step in run_steps.data: if step.type == 'tool_calls': print(f"Tool {step.type} invoked.") # If step involves code execution, print the code if step.type == 'code_interpreter': print(f"Python Code Executed: {step.step_details['code_interpreter']['input']}") if run_status.status == 'completed': # Retrieve all messages from the thread messages = st.session_state.openai_client.beta.threads.messages.list( thread_id=thread.id ) # Print all messages from the thread for msg in messages.data: role = msg.role content = msg.content[0].text.value print(f"{role.capitalize()}: {content}") break # Exit the polling loop since the run is complete elif run_status.status in ['queued', 'in_progress']: print(f'{run_status.status.capitalize()}... Please wait.') time.sleep(1.5) # Wait before checking again else: print(f"Run status: {run_status.status}") break # Exit the polling loop if the status is neither 'in_progress' nor 'completed' print(f"====================\nOpen AI response\n {messages}\n====================\n") text = "" for message in messages: text = text + "\n" + message.content[0].text.value return text except Exception as e: #except openai.error.OpenAIError as e: print(f"An error occurred: {str(e)}") else: try: response = st.session_state.openai_client.chat.completions.create( model=engine, messages=st.session_state.messages + [{"role": "user", "content": prompt}], temperature=temp, seed = SEED, max_tokens=max_tokens ) print(f"====================\nOpen AI response\n {response}\n====================\n") text = response.choices[0].message.content.strip() return text except Exception as e: #except openai.error.OpenAIError as e: print(f"An error occurred: {str(e)}") return "Failed to generate a response." def get_assistant(assistant_id): return st.session_state.openai_client.beta.assistants.retrieve(assistant_id) def send_message(role, content): message = st.session_state.openai_client.beta.threads.messages.create( thread_id=st.session_state.assistant_thread.id, role=role, content=content ) def start_conversation(): st.session_state.assistant_thread = st.session_state.openai_client.beta.threads.create() def run_assistant(): run = st.session_state.openai_client.beta.threads.runs.create( thread_id=st.session_state.assistant_thread.id, assistant_id=st.session_state.assistant.id, ) while run.status == "queued" or run.status == "in_progress": run = st.session_state.openai_client.beta.threads.runs.retrieve( thread_id=st.session_state.assistant_thread.id, run_id=run.id, ) time.sleep(0.5) return run