pyresearch commited on
Commit
3c30c07
·
1 Parent(s): 39b2719

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -114
app.py CHANGED
@@ -1,124 +1,67 @@
1
  import streamlit as st
2
- from PIL import Image
3
- import requests
4
- from io import BytesIO
5
  from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
6
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
7
  from clarifai_grpc.grpc.api.status import status_code_pb2
8
 
9
- # Set up the Streamlit app
10
- st.title("OpenAI Models Integration")
11
-
12
- # ... (previous code)
13
-
14
- # Display HTML content
15
- html_content = """
16
- <div align="center">
17
- <p>
18
- <a href="https://www.youtube.com/channel/UCyB_7yHs7y8u9rONDSXgkCg?view_as=subscriber" target="_blank">
19
- <img width="100%" src="https://github.com/pyresearch/pyresearch/blob/main/ai.png"></a>
20
- </p>
21
-
22
- [English](README.md) | [简体中文](README.md)
23
- </div>
24
-
25
- ## Installation
26
- You can simply use pip to install the latest version of pyresearch.
27
-
28
- <div align="center">`pip install pyresearch`</div>
29
-
30
- <div align="center">
31
- <a href="https://github.com/pyresearch/pyresearch" style="text-decoration:none;">
32
- <img src="https://user-images.githubusercontent.com/34125851/226594737-c21e2dda-9cc6-42ef-b4e7-a685fea4a21d.png" width="2%" alt="" /></a>
33
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
34
- <a href="https://www.linkedin.com/company/pyresearch/" style="text-decoration:none;">
35
- <img src="https://user-images.githubusercontent.com/34125851/226596446-746ffdd0-a47e-4452-84e3-bf11ec2aa26a.png" width="2%" alt="" /></a>
36
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
37
- <a href="https://twitter.com/Noorkhokhar10" style="text-decoration:none;">
38
- <img src="https://user-images.githubusercontent.com/34125851/226599162-9b11194e-4998-440a-ba94-c8a5e1cdc676.png" width="2%" alt="" /></a>
39
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
40
- <a href="https://www.youtube.com/@Pyresearch" style="text-decoration:none;">
41
- <img src="https://user-images.githubusercontent.com/34125851/226599904-7d5cc5c0-89d2-4d1e-891e-19bee1951744.png" width="2%" alt="" /></a>
42
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
43
- <a href="https://www.facebook.com/Pyresearch" style="text-decoration:none;">
44
- <img src="https://user-images.githubusercontent.com/34125851/226600380-a87a9142-e8e0-4ec9-bf2c-dd6e9da2f05a.png" width="2%" alt="" /></a>
45
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
46
- <a href="https://www.instagram.com/pyresearch/" style="text-decoration:none;">
47
- <img src="https://user-images.githubusercontent.com/34125851/226601355-ffe0b597-9840-4e10-bbef-43d6c74b5a9e.png" width="2%" alt="" /></a>
48
- </div>
49
-
50
- <hr>
51
- """
52
-
53
- st.markdown(html_content, unsafe_allow_html=True)
54
-
55
- # Continue with the rest of the Streamlit app code...
56
- # ... (as provided in the previous response)
57
-
58
-
59
- # Model selection
60
- model_option = st.selectbox("Select Model", ["GPT-4 Turbo", "GPT-4 Vision", "DALL-E API", "Text-to-Speech (TTS)"])
61
-
62
- # Common Clarifai credentials
63
  PAT = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
64
  USER_ID = 'openai'
65
- APP_ID = 'chat-completion'
66
-
67
- # Function to make API request and display output
68
- def make_api_request(model_id, model_version_id, raw_text):
69
- channel = ClarifaiChannel.get_grpc_channel()
70
- stub = service_pb2_grpc.V2Stub(channel)
71
- metadata = (('authorization', 'Key ' + PAT),)
72
- userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID)
73
 
74
- post_model_outputs_response = stub.PostModelOutputs(
75
- service_pb2.PostModelOutputsRequest(
76
- user_app_id=userDataObject,
77
- model_id=model_id,
78
- version_id=model_version_id,
79
- inputs=[
80
- resources_pb2.Input(
81
- data=resources_pb2.Data(
82
- text=resources_pb2.Text(
83
- raw=raw_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  )
85
  )
86
- )
87
- ]
88
- ),
89
- metadata=metadata
90
- )
91
-
92
- if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
93
- st.error(f"Clarifai API request failed: {post_model_outputs_response.status.description}")
94
- else:
95
- return post_model_outputs_response.outputs[0].data.image.base64
96
-
97
- # GPT-4 Turbo
98
- if model_option == "GPT-4 Turbo":
99
- model_id = 'gpt-4-turbo'
100
- model_version_id = '182136408b4b4002a920fd500839f2c8'
101
- raw_text = st.text_area("Enter text prompt:", 'I love your Pyresearch very much')
102
- if st.button("Generate Text"):
103
- output_text = make_api_request(model_id, model_version_id, raw_text)
104
- st.write("Generated Text:", output_text)
105
-
106
- # GPT-4 Vision
107
- # ... (previous code)
108
-
109
- # GPT-4 Vision
110
- elif model_option == "GPT-4 Vision":
111
- model_id = 'gpt-4-vision-alternative'
112
- model_version_id = '12b67ac2b5894fb9af9c06ebf8dc02fb'
113
- raw_text = st.text_area("Enter text prompt for vision:", 'A penguin watching the sunset.')
114
- if st.button("Generate Image (GPT-4 Vision)"):
115
- output_image = make_api_request(model_id, model_version_id, raw_text)
116
-
117
- try:
118
- # Attempt to display the image
119
- st.image(output_image, caption='Generated Image', use_column_width=True)
120
- except Exception as e:
121
- st.error(f"Error displaying image: {e}")
122
-
123
- # ...
124
-
 
1
  import streamlit as st
 
 
 
2
  from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
3
  from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
4
  from clarifai_grpc.grpc.api.status import status_code_pb2
5
 
6
+ # Set your Clarifai credentials and model details
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  PAT = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
8
  USER_ID = 'openai'
 
 
 
 
 
 
 
 
9
 
10
+ # Streamlit app
11
+ st.title("AI Integration App")
12
+
13
+ # Choose model type
14
+ model_type = st.radio("Select Model Type", ["GPT-4 Turbo", "GPT-4 Vision", "DALL-E", "Text-to-Speech"])
15
+
16
+ # Input text prompt from the user
17
+ raw_text = st.text_input("Enter a text prompt:", 'I love your product very much')
18
+
19
+ # Button to generate result
20
+ if st.button("Generate Result"):
21
+ if model_type == "GPT-4 Turbo":
22
+ # Connect to Clarifai API for GPT-4 Turbo
23
+ channel = ClarifaiChannel.get_grpc_channel()
24
+ stub = service_pb2_grpc.V2Stub(channel)
25
+ metadata = (('authorization', 'Key ' + PAT),)
26
+ userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id='chat-completion')
27
+
28
+ # Make a request to Clarifai API for GPT-4 Turbo
29
+ post_model_outputs_response = stub.PostModelOutputs(
30
+ service_pb2.PostModelOutputsRequest(
31
+ user_app_id=userDataObject,
32
+ model_id='gpt-4-turbo',
33
+ version_id='182136408b4b4002a920fd500839f2c8',
34
+ inputs=[
35
+ resources_pb2.Input(
36
+ data=resources_pb2.Data(
37
+ text=resources_pb2.Text(
38
+ raw=raw_text
39
+ )
40
  )
41
  )
42
+ ]
43
+ ),
44
+ metadata=metadata
45
+ )
46
+
47
+ # Display the generated result if the request is successful
48
+ if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
49
+ st.error(f"Clarifai API request failed: {post_model_outputs_response.status.description}")
50
+ else:
51
+ output = post_model_outputs_response.outputs[0].data.image.base64
52
+ st.image(output, caption='Generated Image', use_column_width=True)
53
+
54
+ elif model_type == "GPT-4 Vision":
55
+ # Connect to Clarifai API for GPT-4 Vision
56
+ # Replace the following lines with actual GPT-4 Vision logic
57
+ st.warning("GPT-4 Vision integration code goes here.")
58
+
59
+ elif model_type == "DALL-E":
60
+ # Connect to Clarifai API for DALL-E
61
+ # Replace the following lines with actual DALL-E logic
62
+ st.warning("DALL-E integration code goes here.")
63
+
64
+ elif model_type == "Text-to-Speech":
65
+ # Connect to Clarifai API for Text-to-Speech
66
+ # Replace the following lines with actual Text-to-Speech logic
67
+ st.warning("Text-to-Speech integration code goes here.")