Spaces:
Runtime error
Runtime error
Commit
·
426a38b
1
Parent(s):
9411465
Upload app.py
Browse files
app.py
CHANGED
@@ -3,34 +3,77 @@ from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
|
|
3 |
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
|
4 |
from clarifai_grpc.grpc.api.status import status_code_pb2
|
5 |
|
6 |
-
# Set your Clarifai credentials and model details
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Streamlit app
|
11 |
-
st.title("AI Integration App")
|
12 |
|
13 |
# Choose model type
|
14 |
-
model_type = st.radio("Select Model Type", ["
|
15 |
|
16 |
# Input text prompt from the user
|
17 |
-
raw_text = st.text_input("Enter a text prompt:", '
|
18 |
|
19 |
# Button to generate result
|
20 |
if st.button("Generate Result"):
|
21 |
-
if model_type == "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
# Connect to Clarifai API for GPT-4 Turbo
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
# Make a request to Clarifai API for GPT-4 Turbo
|
29 |
-
|
30 |
service_pb2.PostModelOutputsRequest(
|
31 |
-
user_app_id=
|
32 |
-
model_id=
|
33 |
-
version_id=
|
34 |
inputs=[
|
35 |
resources_pb2.Input(
|
36 |
data=resources_pb2.Data(
|
@@ -41,30 +84,17 @@ if st.button("Generate Result"):
|
|
41 |
)
|
42 |
]
|
43 |
),
|
44 |
-
metadata=
|
45 |
)
|
46 |
-
|
47 |
# Display the generated result if the request is successful
|
48 |
-
if
|
49 |
-
st.error(f"
|
50 |
else:
|
51 |
-
|
52 |
-
st.image(
|
|
|
53 |
|
54 |
-
elif model_type == "GPT-4 Vision":
|
55 |
-
# Connect to Clarifai API for GPT-4 Vision
|
56 |
-
# Replace the following lines with actual GPT-4 Vision logic
|
57 |
-
st.warning("GPT-4 Vision integration code goes here.")
|
58 |
-
|
59 |
-
elif model_type == "DALL-E":
|
60 |
-
# Connect to Clarifai API for DALL-E
|
61 |
-
# Replace the following lines with actual DALL-E logic
|
62 |
-
st.warning("DALL-E integration code goes here.")
|
63 |
-
|
64 |
-
elif model_type == "Text-to-Speech":
|
65 |
-
# Connect to Clarifai API for Text-to-Speech
|
66 |
-
# Replace the following lines with actual Text-to-Speech logic
|
67 |
-
st.warning("Text-to-Speech integration code goes here.")
|
68 |
|
69 |
# Add the beautiful social media icon section
|
70 |
st.markdown("""
|
|
|
3 |
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
|
4 |
from clarifai_grpc.grpc.api.status import status_code_pb2
|
5 |
|
6 |
+
# Set your Clarifai credentials and model details for DALL-E
|
7 |
+
PAT_DALLE = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
|
8 |
+
USER_ID_DALLE = 'openai'
|
9 |
+
APP_ID_DALLE = 'dall-e'
|
10 |
+
MODEL_ID_DALLE = 'dall-e-3'
|
11 |
+
MODEL_VERSION_ID_DALLE = 'dc9dcb6ee67543cebc0b9a025861b868'
|
12 |
+
|
13 |
+
# Set your Clarifai credentials and model details for GPT-4 Turbo
|
14 |
+
PAT_GPT4 = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
|
15 |
+
USER_ID_GPT4 = 'openai'
|
16 |
+
APP_ID_GPT4 = 'chat-completion'
|
17 |
+
MODEL_ID_GPT4 = 'gpt-4-turbo'
|
18 |
+
MODEL_VERSION_ID_GPT4 = '182136408b4b4002a920fd500839f2c8'
|
19 |
|
20 |
# Streamlit app
|
21 |
+
st.title("Pyresearch AI Integration App")
|
22 |
|
23 |
# Choose model type
|
24 |
+
model_type = st.radio("Select Model Type", ["DALL-E", "GPT-4 Turbo"])
|
25 |
|
26 |
# Input text prompt from the user
|
27 |
+
raw_text = st.text_input("Enter a text prompt:", 'ocr check mistake with image base with python opencv computer vision help out to know people')
|
28 |
|
29 |
# Button to generate result
|
30 |
if st.button("Generate Result"):
|
31 |
+
if model_type == "DALL-E":
|
32 |
+
# Connect to Clarifai API for DALL-E
|
33 |
+
channel_dalle = ClarifaiChannel.get_grpc_channel()
|
34 |
+
stub_dalle = service_pb2_grpc.V2Stub(channel_dalle)
|
35 |
+
metadata_dalle = (('authorization', 'Key ' + PAT_DALLE),)
|
36 |
+
userDataObject_dalle = resources_pb2.UserAppIDSet(user_id=USER_ID_DALLE, app_id=APP_ID_DALLE)
|
37 |
+
|
38 |
+
# Make a request to Clarifai API for DALL-E
|
39 |
+
post_model_outputs_response_dalle = stub_dalle.PostModelOutputs(
|
40 |
+
service_pb2.PostModelOutputsRequest(
|
41 |
+
user_app_id=userDataObject_dalle,
|
42 |
+
model_id=MODEL_ID_DALLE,
|
43 |
+
version_id=MODEL_VERSION_ID_DALLE,
|
44 |
+
inputs=[
|
45 |
+
resources_pb2.Input(
|
46 |
+
data=resources_pb2.Data(
|
47 |
+
text=resources_pb2.Text(
|
48 |
+
raw=raw_text
|
49 |
+
)
|
50 |
+
)
|
51 |
+
)
|
52 |
+
]
|
53 |
+
),
|
54 |
+
metadata=metadata_dalle
|
55 |
+
)
|
56 |
+
|
57 |
+
# Display the generated image if the request is successful
|
58 |
+
if post_model_outputs_response_dalle.status.code != status_code_pb2.SUCCESS:
|
59 |
+
st.error(f"DALL-E API request failed: {post_model_outputs_response_dalle.status.description}")
|
60 |
+
else:
|
61 |
+
output_dalle = post_model_outputs_response_dalle.outputs[0].data.image.base64
|
62 |
+
st.image(output_dalle, caption='Generated Image (DALL-E)', use_column_width=True)
|
63 |
+
|
64 |
+
elif model_type == "GPT-4 Turbo":
|
65 |
# Connect to Clarifai API for GPT-4 Turbo
|
66 |
+
channel_gpt4 = ClarifaiChannel.get_grpc_channel()
|
67 |
+
stub_gpt4 = service_pb2_grpc.V2Stub(channel_gpt4)
|
68 |
+
metadata_gpt4 = (('authorization', 'Key ' + PAT_GPT4),)
|
69 |
+
userDataObject_gpt4 = resources_pb2.UserAppIDSet(user_id=USER_ID_GPT4, app_id=APP_ID_GPT4)
|
70 |
+
|
71 |
# Make a request to Clarifai API for GPT-4 Turbo
|
72 |
+
post_model_outputs_response_gpt4 = stub_gpt4.PostModelOutputs(
|
73 |
service_pb2.PostModelOutputsRequest(
|
74 |
+
user_app_id=userDataObject_gpt4,
|
75 |
+
model_id=MODEL_ID_GPT4,
|
76 |
+
version_id=MODEL_VERSION_ID_GPT4,
|
77 |
inputs=[
|
78 |
resources_pb2.Input(
|
79 |
data=resources_pb2.Data(
|
|
|
84 |
)
|
85 |
]
|
86 |
),
|
87 |
+
metadata=metadata_gpt4
|
88 |
)
|
89 |
+
|
90 |
# Display the generated result if the request is successful
|
91 |
+
if post_model_outputs_response_gpt4.status.code != status_code_pb2.SUCCESS:
|
92 |
+
st.error(f"GPT-4 Turbo API request failed: {post_model_outputs_response_gpt4.status.description}")
|
93 |
else:
|
94 |
+
output_gpt4 = post_model_outputs_response_gpt4.outputs[0].data.image.base64
|
95 |
+
st.image(output_gpt4, caption='Generated Image (GPT-4 Turbo)', use_column_width=True)
|
96 |
+
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
# Add the beautiful social media icon section
|
100 |
st.markdown("""
|