pyresearch commited on
Commit
d5a5133
·
verified ·
1 Parent(s): 8eec5d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -222
app.py CHANGED
@@ -1,233 +1,33 @@
1
  import streamlit as st
2
- from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
3
- from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
4
- from clarifai_grpc.grpc.api.status import status_code_pb2
5
  import torch
6
- from transformers import AutoModelForCausalLM, AutoTokenizer
7
 
 
 
 
 
 
8
 
9
- # GPT-4 credentials
10
- PAT_GPT4 = "3ca5bd8b0f2244eb8d0e4b2838fc3cf1"
11
- USER_ID_GPT4 = "openai"
12
- APP_ID_GPT4 = "chat-completion"
13
- MODEL_ID_GPT4 = "openai-gpt-4-vision"
14
- MODEL_VERSION_ID_GPT4 = "266df29bc09843e0aee9b7bf723c03c2"
15
-
16
- # DALL-E credentials
17
- PAT_DALLE = "bfdeb4029ef54d23a2e608b0aa4c00e4"
18
- USER_ID_DALLE = "openai"
19
- APP_ID_DALLE = "dall-e"
20
- MODEL_ID_DALLE = "dall-e-3"
21
- MODEL_VERSION_ID_DALLE = "dc9dcb6ee67543cebc0b9a025861b868"
22
-
23
- # TTS credentials
24
- PAT_TTS = "bfdeb4029ef54d23a2e608b0aa4c00e4"
25
- USER_ID_TTS = "openai"
26
- APP_ID_TTS = "tts"
27
- MODEL_ID_TTS = "openai-tts-1"
28
- MODEL_VERSION_ID_TTS = "fff6ce1fd487457da95b79241ac6f02d"
29
-
30
- # NewsGuardian model credentials
31
- PAT_NEWSGUARDIAN = "your_news_guardian_pat"
32
- USER_ID_NEWSGUARDIAN = "your_user_id"
33
- APP_ID_NEWSGUARDIAN = "your_app_id"
34
- MODEL_ID_NEWSGUARDIAN = "your_model_id"
35
- MODEL_VERSION_ID_NEWSGUARDIAN = "your_model_version_id"
36
-
37
-
38
-
39
-
40
-
41
-
42
- # Set up gRPC channel for NewsGuardian model
43
- channel_tts = ClarifaiChannel.get_grpc_channel()
44
- stub_tts = service_pb2_grpc.V2Stub(channel_tts)
45
- metadata_tts = (('authorization', 'Key ' + PAT_TTS),)
46
- userDataObject_tts = resources_pb2.UserAppIDSet(user_id=USER_ID_TTS, app_id=APP_ID_TTS,)
47
 
48
  # Streamlit app
49
- st.title("NewsGuardian")
50
-
51
-
52
- # Inserting logo
53
- st.image("https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTdA-MJ_SUCRgLs1prqudpMdaX4x-x10Zqlwp7cpzXWCMM9xjBAJYWdJsDlLoHBqNpj8qs&usqp=CAU")
54
- # Function to get gRPC channel for NewsGuardian model
55
- def get_tts_channel():
56
- channel_tts = ClarifaiChannel.get_grpc_channel()
57
- return channel_tts, channel_tts.metadata
58
-
59
 
 
60
 
61
  # User input
62
- model_type = st.selectbox("Select Model", ["NewsGuardian model","NewsGuardian model"])
63
- raw_text = st.text_area("This news is real or fake?")
64
- image_upload = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
65
-
66
- # Button to generate result
67
- if st.button("NewsGuardian News Result"):
68
- if model_type == "NewsGuardian model":
69
- # Set up gRPC channel for NewsGuardian model
70
- channel_gpt4 = ClarifaiChannel.get_grpc_channel()
71
- stub_gpt4 = service_pb2_grpc.V2Stub(channel_gpt4)
72
- metadata_gpt4 = (('authorization', 'Key ' + PAT_GPT4),)
73
- userDataObject_gpt4 = resources_pb2.UserAppIDSet(user_id=USER_ID_GPT4, app_id=APP_ID_GPT4)
74
-
75
- # Prepare the request for NewsGuardian model
76
- input_data_gpt4 = resources_pb2.Data()
77
-
78
- if raw_text:
79
- input_data_gpt4.text.raw = raw_text
80
-
81
- if image_upload is not None:
82
- image_bytes_gpt4 = image_upload.read()
83
- input_data_gpt4.image.base64 = image_bytes_gpt4
84
-
85
- post_model_outputs_response_gpt4 = stub_gpt4.PostModelOutputs(
86
- service_pb2.PostModelOutputsRequest(
87
- user_app_id=userDataObject_gpt4,
88
- model_id=MODEL_ID_GPT4,
89
- version_id=MODEL_VERSION_ID_GPT4,
90
- inputs=[resources_pb2.Input(data=input_data_gpt4)]
91
- ),
92
- metadata=metadata_gpt4 # Use metadata directly in the gRPC request
93
- )
94
-
95
- # Check if the request was successful for NewsGuardian model
96
- if post_model_outputs_response_gpt4.status.code != status_code_pb2.SUCCESS:
97
- st.error(f"NewsGuardian model API request failed: {post_model_outputs_response_gpt4.status.description}")
98
- else:
99
- # Get the output for NewsGuardian model
100
- output_gpt4 = post_model_outputs_response_gpt4.outputs[0].data
101
-
102
- # Display the result for NewsGuardian model
103
- if output_gpt4.HasField("image"):
104
- st.image(output_gpt4.image.base64, caption='Generated Image (NewsGuardian model)', use_column_width=True)
105
- elif output_gpt4.HasField("text"):
106
- # Display the text result
107
- st.text(output_gpt4.text.raw)
108
-
109
- # Convert text to speech and play the audio
110
- stub_tts = service_pb2_grpc.V2Stub(channel_gpt4) # Use the same channel for TTS
111
-
112
- tts_input_data = resources_pb2.Data()
113
- tts_input_data.text.raw = output_gpt4.text.raw
114
-
115
- tts_response = stub_tts.PostModelOutputs(
116
- service_pb2.PostModelOutputsRequest(
117
- user_app_id=userDataObject_tts,
118
- model_id=MODEL_ID_TTS,
119
- version_id=MODEL_VERSION_ID_TTS,
120
- inputs=[resources_pb2.Input(data=tts_input_data)]
121
- ),
122
- metadata=metadata_gpt4 # Use the same metadata for TTS
123
- )
124
-
125
- # Check if the TTS request was successful
126
- if tts_response.status.code == status_code_pb2.SUCCESS:
127
- tts_output = tts_response.outputs[0].data
128
- st.audio(tts_output.audio.base64, format='audio/wav')
129
- else:
130
- st.error(f"NewsGuardian model API request failed: {tts_response.status.description}")
131
-
132
- elif model_type == "DALL-E":
133
- # Set up gRPC channel for DALL-E
134
- channel_dalle = ClarifaiChannel.get_grpc_channel()
135
- stub_dalle = service_pb2_grpc.V2Stub(channel_dalle)
136
- metadata_dalle = (('authorization', 'Key ' + PAT_DALLE),)
137
- userDataObject_dalle = resources_pb2.UserAppIDSet(user_id=USER_ID_DALLE, app_id=APP_ID_DALLE)
138
-
139
- # Prepare the request for DALL-E
140
- input_data_dalle = resources_pb2.Data()
141
-
142
- if raw_text:
143
- input_data_dalle.text.raw = raw_text
144
-
145
- post_model_outputs_response_dalle = stub_dalle.PostModelOutputs(
146
- service_pb2.PostModelOutputsRequest(
147
- user_app_id=userDataObject_dalle,
148
- model_id=MODEL_ID_DALLE,
149
- version_id=MODEL_VERSION_ID_DALLE,
150
- inputs=[resources_pb2.Input(data=input_data_dalle)]
151
- ),
152
- metadata=metadata_dalle
153
- )
154
-
155
- # Check if the request was successful for DALL-E
156
- if post_model_outputs_response_dalle.status.code != status_code_pb2.SUCCESS:
157
- st.error(f"DALL-E API request failed: {post_model_outputs_response_dalle.status.description}")
158
- else:
159
- # Get the output for DALL-E
160
- output_dalle = post_model_outputs_response_dalle.outputs[0].data
161
-
162
- # Display the result for DALL-E
163
- if output_dalle.HasField("image"):
164
- st.image(output_dalle.image.base64, caption='Generated Image (DALL-E)', use_column_width=True)
165
- elif output_dalle.HasField("text"):
166
- st.text(output_dalle.text.raw)
167
-
168
- elif model_type == "NewsGuardian model":
169
- # Set up gRPC channel for NewsGuardian model
170
- channel_tts = ClarifaiChannel.get_grpc_channel()
171
- stub_tts = service_pb2_grpc.V2Stub(channel_tts)
172
- metadata_tts = (('authorization', 'Key ' + PAT_TTS),)
173
- userDataObject_tts = resources_pb2.UserAppIDSet(user_id=USER_ID_TTS, app_id=APP_ID_TTS)
174
-
175
- # Prepare the request for NewsGuardian model
176
- input_data_tts = resources_pb2.Data()
177
-
178
- if raw_text:
179
- input_data_tts.text.raw = raw_text
180
-
181
- post_model_outputs_response_tts = stub_tts.PostModelOutputs(
182
- service_pb2.PostModelOutputsRequest(
183
- user_app_id=userDataObject_tts,
184
- model_id=MODEL_ID_TTS,
185
- version_id=MODEL_VERSION_ID_TTS,
186
- inputs=[resources_pb2.Input(data=input_data_tts)]
187
- ),
188
- metadata=metadata_tts
189
  )
190
 
191
- # Check if the request was successful for NewsGuardian model
192
- if post_model_outputs_response_tts.status.code != status_code_pb2.SUCCESS:
193
- st.error(f"NewsGuardian model API request failed: {post_model_outputs_response_tts.status.description}")
194
- else:
195
- # Get the output for NewsGuardian model
196
- output_tts = post_model_outputs_response_tts.outputs[0].data
197
-
198
- # Display the result for NewsGuardian model
199
- if output_tts.HasField("text"):
200
- st.text(output_tts.text.raw)
201
-
202
- if output_tts.HasField("audio"):
203
- st.audio(output_tts.audio.base64, format='audio/wav')
204
-
205
-
206
- # Add the beautiful social media icon section
207
- st.markdown("""
208
- <div align="center">
209
- <a href="https://github.com/pyresearch/pyresearch" style="text-decoration:none;">
210
- <img src="https://user-images.githubusercontent.com/34125851/226594737-c21e2dda-9cc6-42ef-b4e7-a685fea4a21d.png" width="2%" alt="" /></a>
211
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
212
- <a href="https://www.linkedin.com/company/pyresearch/" style="text-decoration:none;">
213
- <img src="https://user-images.githubusercontent.com/34125851/226596446-746ffdd0-a47e-4452-84e3-bf11ec2aa26a.png" width="2%" alt="" /></a>
214
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
215
- <a href="https://twitter.com/Noorkhokhar10" style="text-decoration:none;">
216
- <img src="https://user-images.githubusercontent.com/34125851/226599162-9b11194e-4998-440a-ba94-c8a5e1cdc676.png" width="2%" alt="" /></a>
217
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
218
- <a href="https://www.youtube.com/@Pyresearch" style="text-decoration:none;">
219
- <img src="https://user-images.githubusercontent.com/34125851/226599904-7d5cc5c0-89d2-4d1e-891e-19bee1951744.png" width="2%" alt="" /></a>
220
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
221
- <a href="https://www.facebook.com/Pyresearch" style="text-decoration:none;">
222
- <img src="https://user-images.githubusercontent.com/34125851/226600380-a87a9142-e8e0-4ec9-bf2c-dd6e9da2f05a.png" width="2%" alt="" /></a>
223
- <img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
224
- <a href="https://www.instagram.com/pyresearch/" style="text-decoration:none;">
225
- <img src="https://user-images.githubusercontent.com/34125851/226601355-ffe0b597-9840-4e10-bbef-43d6c74b5a9e.png" width="2%" alt="" /></a>
226
- </div>
227
- <hr>
228
- """, unsafe_allow_html=True)
229
-
230
-
231
-
232
-
233
-
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
3
  import torch
 
4
 
5
+ # Load tokenizer and model
6
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ "microsoft/phi-2", torch_dtype="auto", device_map="auto", trust_remote_code=True
9
+ )
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Streamlit app
13
+ st.title("Fake news Generation with Transformers Microsoft phi-2")
 
 
 
 
 
 
 
 
 
14
 
15
+ st.image("https://raw.githubusercontent.com/noorkhokhar99/NewsGuardian/main/logo.jpeg")
16
 
17
  # User input
18
+ prompt = st.text_area("Enter your prompt:", "This news is real or fake you get results from here NewsGuardian")
19
+
20
+ # Generate output
21
+ if st.button("Generate"):
22
+ with torch.no_grad():
23
+ token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
24
+ output_ids = model.generate(
25
+ token_ids.to(model.device),
26
+ max_new_tokens=20,
27
+ do_sample=True,
28
+ temperature=0.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  )
30
 
31
+ output = tokenizer.decode(output_ids[0][token_ids.size(1):])
32
+ st.text("Generated Output:")
33
+ st.write(output)