Spaces:
Runtime error
Runtime error
Commit
·
ae67bc1
1
Parent(s):
b773a19
Upload 2 files
Browse files- app.py +87 -0
- requirements.txt +8 -0
app.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
|
3 |
+
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
|
4 |
+
from clarifai_grpc.grpc.api.status import status_code_pb2
|
5 |
+
|
6 |
+
# Set up the Streamlit app
|
7 |
+
st.title("OpenAI Models Integration")
|
8 |
+
|
9 |
+
# Model selection
|
10 |
+
model_option = st.selectbox("Select Model", ["GPT-4 Turbo", "GPT-4 Vision", "DALL-E API", "Text-to-Speech (TTS)"])
|
11 |
+
|
12 |
+
# Common Clarifai credentials
|
13 |
+
PAT = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
|
14 |
+
USER_ID = 'openai'
|
15 |
+
APP_ID = 'chat-completion'
|
16 |
+
|
17 |
+
# Function to make API request and display output
|
18 |
+
def make_api_request(model_id, model_version_id, raw_text):
|
19 |
+
channel = ClarifaiChannel.get_grpc_channel()
|
20 |
+
stub = service_pb2_grpc.V2Stub(channel)
|
21 |
+
metadata = (('authorization', 'Key ' + PAT),)
|
22 |
+
userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID)
|
23 |
+
|
24 |
+
post_model_outputs_response = stub.PostModelOutputs(
|
25 |
+
service_pb2.PostModelOutputsRequest(
|
26 |
+
user_app_id=userDataObject,
|
27 |
+
model_id=model_id,
|
28 |
+
version_id=model_version_id,
|
29 |
+
inputs=[
|
30 |
+
resources_pb2.Input(
|
31 |
+
data=resources_pb2.Data(
|
32 |
+
text=resources_pb2.Text(
|
33 |
+
raw=raw_text
|
34 |
+
)
|
35 |
+
)
|
36 |
+
)
|
37 |
+
]
|
38 |
+
),
|
39 |
+
metadata=metadata
|
40 |
+
)
|
41 |
+
|
42 |
+
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
|
43 |
+
st.error(f"Clarifai API request failed: {post_model_outputs_response.status.description}")
|
44 |
+
else:
|
45 |
+
return post_model_outputs_response.outputs[0].data.image.base64
|
46 |
+
|
47 |
+
# GPT-4 Turbo
|
48 |
+
if model_option == "GPT-4 Turbo":
|
49 |
+
model_id = 'gpt-4-turbo'
|
50 |
+
model_version_id = '182136408b4b4002a920fd500839f2c8'
|
51 |
+
raw_text = st.text_area("Enter text prompt:", 'I love your product very much')
|
52 |
+
if st.button("Generate Text"):
|
53 |
+
output_text = make_api_request(model_id, model_version_id, raw_text)
|
54 |
+
st.write("Generated Text:", output_text)
|
55 |
+
|
56 |
+
# GPT-4 Vision
|
57 |
+
elif model_option == "GPT-4 Vision":
|
58 |
+
model_id = 'gpt-4-vision-alternative'
|
59 |
+
model_version_id = '12b67ac2b5894fb9af9c06ebf8dc02fb'
|
60 |
+
raw_text = st.text_area("Enter text prompt for vision:", 'A penguin watching the sunset.')
|
61 |
+
if st.button("Generate Image (GPT-4 Vision)"):
|
62 |
+
output_image = make_api_request(model_id, model_version_id, raw_text)
|
63 |
+
st.image(output_image, caption='Generated Image', use_column_width=True)
|
64 |
+
|
65 |
+
# DALL-E API
|
66 |
+
elif model_option == "DALL-E API":
|
67 |
+
PAT_dalle = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
|
68 |
+
USER_ID_dalle = 'openai'
|
69 |
+
APP_ID_dalle = 'dall-e'
|
70 |
+
MODEL_ID_dalle = 'dall-e-3'
|
71 |
+
MODEL_VERSION_ID_dalle = 'dc9dcb6ee67543cebc0b9a025861b868'
|
72 |
+
raw_text_dalle = st.text_area("Enter text prompt for DALL-E:", 'A penguin watching the sunset.')
|
73 |
+
if st.button("Generate Image (DALL-E API)"):
|
74 |
+
output_image_dalle = make_api_request(MODEL_ID_dalle, MODEL_VERSION_ID_dalle, raw_text_dalle)
|
75 |
+
st.image(output_image_dalle, caption='Generated Image (DALL-E API)', use_column_width=True)
|
76 |
+
|
77 |
+
# Text-to-Speech (TTS)
|
78 |
+
elif model_option == "Text-to-Speech (TTS)":
|
79 |
+
PAT_tts = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
|
80 |
+
USER_ID_tts = 'openai'
|
81 |
+
APP_ID_tts = 'tts'
|
82 |
+
MODEL_ID_tts = 'openai-tts-1'
|
83 |
+
MODEL_VERSION_ID_tts = 'fff6ce1fd487457da95b79241ac6f02d'
|
84 |
+
raw_text_tts = st.text_area("Enter text for Text-to-Speech:", 'I love your product very much')
|
85 |
+
if st.button("Generate Speech"):
|
86 |
+
output_audio = make_api_request(MODEL_ID_tts, MODEL_VERSION_ID_tts, raw_text_tts)
|
87 |
+
st.audio(output_audio, format="audio/wav")
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
pytesseract
|
3 |
+
tesseract
|
4 |
+
requests
|
5 |
+
numpy
|
6 |
+
opencv-python
|
7 |
+
tesseract
|
8 |
+
|