import streamlit as st from PIL import Image import requests from io import BytesIO from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc from clarifai_grpc.grpc.api.status import status_code_pb2 # Set up the Streamlit app st.title("OpenAI Models Integration") # ... (previous code) # Display HTML content html_content = """

[English](README.md) | [简体中文](README.md)
## Installation You can simply use pip to install the latest version of pyresearch.
`pip install pyresearch`

""" st.markdown(html_content, unsafe_allow_html=True) # Continue with the rest of the Streamlit app code... # ... (as provided in the previous response) # Model selection model_option = st.selectbox("Select Model", ["GPT-4 Turbo", "GPT-4 Vision", "DALL-E API", "Text-to-Speech (TTS)"]) # Common Clarifai credentials PAT = 'bfdeb4029ef54d23a2e608b0aa4c00e4' USER_ID = 'openai' APP_ID = 'chat-completion' # Function to make API request and display output def make_api_request(model_id, model_version_id, raw_text): channel = ClarifaiChannel.get_grpc_channel() stub = service_pb2_grpc.V2Stub(channel) metadata = (('authorization', 'Key ' + PAT),) userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID) post_model_outputs_response = stub.PostModelOutputs( service_pb2.PostModelOutputsRequest( user_app_id=userDataObject, model_id=model_id, version_id=model_version_id, inputs=[ resources_pb2.Input( data=resources_pb2.Data( text=resources_pb2.Text( raw=raw_text ) ) ) ] ), metadata=metadata ) if post_model_outputs_response.status.code != status_code_pb2.SUCCESS: st.error(f"Clarifai API request failed: {post_model_outputs_response.status.description}") else: return post_model_outputs_response.outputs[0].data.image.base64 # GPT-4 Turbo if model_option == "GPT-4 Turbo": model_id = 'gpt-4-turbo' model_version_id = '182136408b4b4002a920fd500839f2c8' raw_text = st.text_area("Enter text prompt:", 'I love your Pyresearch very much') if st.button("Generate Text"): output_text = make_api_request(model_id, model_version_id, raw_text) st.write("Generated Text:", output_text) # GPT-4 Vision # ... (previous code) # GPT-4 Vision elif model_option == "GPT-4 Vision": model_id = 'gpt-4-vision-alternative' model_version_id = '12b67ac2b5894fb9af9c06ebf8dc02fb' raw_text = st.text_area("Enter text prompt for vision:", 'A penguin watching the sunset.') if st.button("Generate Image (GPT-4 Vision)"): output_image = make_api_request(model_id, model_version_id, raw_text) try: # Attempt to display the image st.image(output_image, caption='Generated Image', use_column_width=True) except Exception as e: st.error(f"Error displaying image: {e}") # ...