pyresearch commited on
Commit
7e297e1
·
verified ·
1 Parent(s): b7bc901

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +223 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
3
+ from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
4
+ from clarifai_grpc.grpc.api.status import status_code_pb2
5
+
6
+ # Set your Clarifai credentials and model details for GPT-4 Vision
7
+ PAT_GPT4 = '3ca5bd8b0f2244eb8d0e4b2838fc3cf1'
8
+ USER_ID_GPT4 = 'openai'
9
+ APP_ID_GPT4 = 'chat-completion'
10
+ MODEL_ID_GPT4 = 'openai-gpt-4-vision'
11
+ MODEL_VERSION_ID_GPT4 = '266df29bc09843e0aee9b7bf723c03c2'
12
+
13
+ # Set your Clarifai credentials and model details for DALL-E
14
+ PAT_DALLE = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
15
+ USER_ID_DALLE = 'openai'
16
+ APP_ID_DALLE = 'dall-e'
17
+ MODEL_ID_DALLE = 'dall-e-3'
18
+ MODEL_VERSION_ID_DALLE = 'dc9dcb6ee67543cebc0b9a025861b868'
19
+
20
+
21
+
22
+ # Streamlit app
23
+ # Set your Clarifai credentials for Text-to-Speech (TTS)
24
+ PAT_TTS = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
25
+ USER_ID_TTS = 'openai'
26
+ APP_ID_TTS = 'tts'
27
+ MODEL_ID_TTS = 'openai-tts-1'
28
+ MODEL_VERSION_ID_TTS = 'fff6ce1fd487457da95b79241ac6f02d'
29
+
30
+
31
+ # Set up gRPC channel for Text-to-Speech (TTS)
32
+ channel_tts = ClarifaiChannel.get_grpc_channel()
33
+ stub_tts = service_pb2_grpc.V2Stub(channel_tts)
34
+ metadata_tts = (('authorization', 'Key ' + PAT_TTS),)
35
+ userDataObject_tts = resources_pb2.UserAppIDSet(user_id=USER_ID_TTS, app_id=APP_ID_TTS,)
36
+
37
+ # Streamlit app
38
+ st.title("Fake news detection")
39
+
40
+
41
+ # Inserting logo
42
+ st.image("https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTdA-MJ_SUCRgLs1prqudpMdaX4x-x10Zqlwp7cpzXWCMM9xjBAJYWdJsDlLoHBqNpj8qs&usqp=CAU")
43
+ # Function to get gRPC channel for Text-to-Speech (TTS)
44
+ def get_tts_channel():
45
+ channel_tts = ClarifaiChannel.get_grpc_channel()
46
+ return channel_tts, channel_tts.metadata
47
+
48
+
49
+
50
+ # User input
51
+ model_type = st.selectbox("Select Model", ["GPT-4 Vision", "Text-to-Speech (TTS)"])
52
+ raw_text = st.text_area("This news is real or fake?")
53
+ image_upload = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
54
+
55
+ # Button to generate result
56
+ if st.button("Generate Result"):
57
+ if model_type == "GPT-4 Vision":
58
+ # Set up gRPC channel for GPT-4 Vision
59
+ channel_gpt4 = ClarifaiChannel.get_grpc_channel()
60
+ stub_gpt4 = service_pb2_grpc.V2Stub(channel_gpt4)
61
+ metadata_gpt4 = (('authorization', 'Key ' + PAT_GPT4),)
62
+ userDataObject_gpt4 = resources_pb2.UserAppIDSet(user_id=USER_ID_GPT4, app_id=APP_ID_GPT4)
63
+
64
+ # Prepare the request for GPT-4 Vision
65
+ input_data_gpt4 = resources_pb2.Data()
66
+
67
+ if raw_text:
68
+ input_data_gpt4.text.raw = raw_text
69
+
70
+ if image_upload is not None:
71
+ image_bytes_gpt4 = image_upload.read()
72
+ input_data_gpt4.image.base64 = image_bytes_gpt4
73
+
74
+ post_model_outputs_response_gpt4 = stub_gpt4.PostModelOutputs(
75
+ service_pb2.PostModelOutputsRequest(
76
+ user_app_id=userDataObject_gpt4,
77
+ model_id=MODEL_ID_GPT4,
78
+ version_id=MODEL_VERSION_ID_GPT4,
79
+ inputs=[resources_pb2.Input(data=input_data_gpt4)]
80
+ ),
81
+ metadata=metadata_gpt4 # Use metadata directly in the gRPC request
82
+ )
83
+
84
+ # Check if the request was successful for GPT-4 Vision
85
+ if post_model_outputs_response_gpt4.status.code != status_code_pb2.SUCCESS:
86
+ st.error(f"GPT-4 Vision API request failed: {post_model_outputs_response_gpt4.status.description}")
87
+ else:
88
+ # Get the output for GPT-4 Vision
89
+ output_gpt4 = post_model_outputs_response_gpt4.outputs[0].data
90
+
91
+ # Display the result for GPT-4 Vision
92
+ if output_gpt4.HasField("image"):
93
+ st.image(output_gpt4.image.base64, caption='Generated Image (GPT-4 Vision)', use_column_width=True)
94
+ elif output_gpt4.HasField("text"):
95
+ # Display the text result
96
+ st.text(output_gpt4.text.raw)
97
+
98
+ # Convert text to speech and play the audio
99
+ stub_tts = service_pb2_grpc.V2Stub(channel_gpt4) # Use the same channel for TTS
100
+
101
+ tts_input_data = resources_pb2.Data()
102
+ tts_input_data.text.raw = output_gpt4.text.raw
103
+
104
+ tts_response = stub_tts.PostModelOutputs(
105
+ service_pb2.PostModelOutputsRequest(
106
+ user_app_id=userDataObject_tts,
107
+ model_id=MODEL_ID_TTS,
108
+ version_id=MODEL_VERSION_ID_TTS,
109
+ inputs=[resources_pb2.Input(data=tts_input_data)]
110
+ ),
111
+ metadata=metadata_gpt4 # Use the same metadata for TTS
112
+ )
113
+
114
+ # Check if the TTS request was successful
115
+ if tts_response.status.code == status_code_pb2.SUCCESS:
116
+ tts_output = tts_response.outputs[0].data
117
+ st.audio(tts_output.audio.base64, format='audio/wav')
118
+ else:
119
+ st.error(f"Text-to-Speech (TTS) API request failed: {tts_response.status.description}")
120
+
121
+ elif model_type == "DALL-E":
122
+ # Set up gRPC channel for DALL-E
123
+ channel_dalle = ClarifaiChannel.get_grpc_channel()
124
+ stub_dalle = service_pb2_grpc.V2Stub(channel_dalle)
125
+ metadata_dalle = (('authorization', 'Key ' + PAT_DALLE),)
126
+ userDataObject_dalle = resources_pb2.UserAppIDSet(user_id=USER_ID_DALLE, app_id=APP_ID_DALLE)
127
+
128
+ # Prepare the request for DALL-E
129
+ input_data_dalle = resources_pb2.Data()
130
+
131
+ if raw_text:
132
+ input_data_dalle.text.raw = raw_text
133
+
134
+ post_model_outputs_response_dalle = stub_dalle.PostModelOutputs(
135
+ service_pb2.PostModelOutputsRequest(
136
+ user_app_id=userDataObject_dalle,
137
+ model_id=MODEL_ID_DALLE,
138
+ version_id=MODEL_VERSION_ID_DALLE,
139
+ inputs=[resources_pb2.Input(data=input_data_dalle)]
140
+ ),
141
+ metadata=metadata_dalle
142
+ )
143
+
144
+ # Check if the request was successful for DALL-E
145
+ if post_model_outputs_response_dalle.status.code != status_code_pb2.SUCCESS:
146
+ st.error(f"DALL-E API request failed: {post_model_outputs_response_dalle.status.description}")
147
+ else:
148
+ # Get the output for DALL-E
149
+ output_dalle = post_model_outputs_response_dalle.outputs[0].data
150
+
151
+ # Display the result for DALL-E
152
+ if output_dalle.HasField("image"):
153
+ st.image(output_dalle.image.base64, caption='Generated Image (DALL-E)', use_column_width=True)
154
+ elif output_dalle.HasField("text"):
155
+ st.text(output_dalle.text.raw)
156
+
157
+ elif model_type == "Text-to-Speech (TTS)":
158
+ # Set up gRPC channel for Text-to-Speech (TTS)
159
+ channel_tts = ClarifaiChannel.get_grpc_channel()
160
+ stub_tts = service_pb2_grpc.V2Stub(channel_tts)
161
+ metadata_tts = (('authorization', 'Key ' + PAT_TTS),)
162
+ userDataObject_tts = resources_pb2.UserAppIDSet(user_id=USER_ID_TTS, app_id=APP_ID_TTS)
163
+
164
+ # Prepare the request for Text-to-Speech (TTS)
165
+ input_data_tts = resources_pb2.Data()
166
+
167
+ if raw_text:
168
+ input_data_tts.text.raw = raw_text
169
+
170
+ post_model_outputs_response_tts = stub_tts.PostModelOutputs(
171
+ service_pb2.PostModelOutputsRequest(
172
+ user_app_id=userDataObject_tts,
173
+ model_id=MODEL_ID_TTS,
174
+ version_id=MODEL_VERSION_ID_TTS,
175
+ inputs=[resources_pb2.Input(data=input_data_tts)]
176
+ ),
177
+ metadata=metadata_tts
178
+ )
179
+
180
+ # Check if the request was successful for Text-to-Speech (TTS)
181
+ if post_model_outputs_response_tts.status.code != status_code_pb2.SUCCESS:
182
+ st.error(f"Text-to-Speech (TTS) API request failed: {post_model_outputs_response_tts.status.description}")
183
+ else:
184
+ # Get the output for Text-to-Speech (TTS)
185
+ output_tts = post_model_outputs_response_tts.outputs[0].data
186
+
187
+ # Display the result for Text-to-Speech (TTS)
188
+ if output_tts.HasField("text"):
189
+ st.text(output_tts.text.raw)
190
+
191
+ if output_tts.HasField("audio"):
192
+ st.audio(output_tts.audio.base64, format='audio/wav')
193
+
194
+
195
+ # Add the beautiful social media icon section
196
+ st.markdown("""
197
+ <div align="center">
198
+ <a href="https://github.com/pyresearch/pyresearch" style="text-decoration:none;">
199
+ <img src="https://user-images.githubusercontent.com/34125851/226594737-c21e2dda-9cc6-42ef-b4e7-a685fea4a21d.png" width="2%" alt="" /></a>
200
+ <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
201
+ <a href="https://www.linkedin.com/company/pyresearch/" style="text-decoration:none;">
202
+ <img src="https://user-images.githubusercontent.com/34125851/226596446-746ffdd0-a47e-4452-84e3-bf11ec2aa26a.png" width="2%" alt="" /></a>
203
+ <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
204
+ <a href="https://twitter.com/Noorkhokhar10" style="text-decoration:none;">
205
+ <img src="https://user-images.githubusercontent.com/34125851/226599162-9b11194e-4998-440a-ba94-c8a5e1cdc676.png" width="2%" alt="" /></a>
206
+ <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
207
+ <a href="https://www.youtube.com/@Pyresearch" style="text-decoration:none;">
208
+ <img src="https://user-images.githubusercontent.com/34125851/226599904-7d5cc5c0-89d2-4d1e-891e-19bee1951744.png" width="2%" alt="" /></a>
209
+ <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
210
+ <a href="https://www.facebook.com/Pyresearch" style="text-decoration:none;">
211
+ <img src="https://user-images.githubusercontent.com/34125851/226600380-a87a9142-e8e0-4ec9-bf2c-dd6e9da2f05a.png" width="2%" alt="" /></a>
212
+ <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
213
+ <a href="https://www.instagram.com/pyresearch/" style="text-decoration:none;">
214
+ <img src="https://user-images.githubusercontent.com/34125851/226601355-ffe0b597-9840-4e10-bbef-43d6c74b5a9e.png" width="2%" alt="" /></a>
215
+ </div>
216
+ <hr>
217
+ """, unsafe_allow_html=True)
218
+
219
+
220
+
221
+
222
+
223
+
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ pytesseract
3
+ tesseract
4
+ requests
5
+ numpy
6
+ opencv-python
7
+ tesseract
8
+ clarifai_grpc
9
+