Spaces:
Runtime error
Runtime error
File size: 3,652 Bytes
ae67bc1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import streamlit as st
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
from clarifai_grpc.grpc.api.status import status_code_pb2
# Set up the Streamlit app
st.title("OpenAI Models Integration")
# Model selection
model_option = st.selectbox("Select Model", ["GPT-4 Turbo", "GPT-4 Vision", "DALL-E API", "Text-to-Speech (TTS)"])
# Common Clarifai credentials
PAT = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
USER_ID = 'openai'
APP_ID = 'chat-completion'
# Function to make API request and display output
def make_api_request(model_id, model_version_id, raw_text):
channel = ClarifaiChannel.get_grpc_channel()
stub = service_pb2_grpc.V2Stub(channel)
metadata = (('authorization', 'Key ' + PAT),)
userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID)
post_model_outputs_response = stub.PostModelOutputs(
service_pb2.PostModelOutputsRequest(
user_app_id=userDataObject,
model_id=model_id,
version_id=model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(
text=resources_pb2.Text(
raw=raw_text
)
)
)
]
),
metadata=metadata
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
st.error(f"Clarifai API request failed: {post_model_outputs_response.status.description}")
else:
return post_model_outputs_response.outputs[0].data.image.base64
# GPT-4 Turbo
if model_option == "GPT-4 Turbo":
model_id = 'gpt-4-turbo'
model_version_id = '182136408b4b4002a920fd500839f2c8'
raw_text = st.text_area("Enter text prompt:", 'I love your product very much')
if st.button("Generate Text"):
output_text = make_api_request(model_id, model_version_id, raw_text)
st.write("Generated Text:", output_text)
# GPT-4 Vision
elif model_option == "GPT-4 Vision":
model_id = 'gpt-4-vision-alternative'
model_version_id = '12b67ac2b5894fb9af9c06ebf8dc02fb'
raw_text = st.text_area("Enter text prompt for vision:", 'A penguin watching the sunset.')
if st.button("Generate Image (GPT-4 Vision)"):
output_image = make_api_request(model_id, model_version_id, raw_text)
st.image(output_image, caption='Generated Image', use_column_width=True)
# DALL-E API
elif model_option == "DALL-E API":
PAT_dalle = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
USER_ID_dalle = 'openai'
APP_ID_dalle = 'dall-e'
MODEL_ID_dalle = 'dall-e-3'
MODEL_VERSION_ID_dalle = 'dc9dcb6ee67543cebc0b9a025861b868'
raw_text_dalle = st.text_area("Enter text prompt for DALL-E:", 'A penguin watching the sunset.')
if st.button("Generate Image (DALL-E API)"):
output_image_dalle = make_api_request(MODEL_ID_dalle, MODEL_VERSION_ID_dalle, raw_text_dalle)
st.image(output_image_dalle, caption='Generated Image (DALL-E API)', use_column_width=True)
# Text-to-Speech (TTS)
elif model_option == "Text-to-Speech (TTS)":
PAT_tts = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
USER_ID_tts = 'openai'
APP_ID_tts = 'tts'
MODEL_ID_tts = 'openai-tts-1'
MODEL_VERSION_ID_tts = 'fff6ce1fd487457da95b79241ac6f02d'
raw_text_tts = st.text_area("Enter text for Text-to-Speech:", 'I love your product very much')
if st.button("Generate Speech"):
output_audio = make_api_request(MODEL_ID_tts, MODEL_VERSION_ID_tts, raw_text_tts)
st.audio(output_audio, format="audio/wav")
|