pyresearch's picture
Upload app.py
59e8a5c
raw
history blame
5.52 kB
import streamlit as st
from PIL import Image
import requests
from io import BytesIO
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
from clarifai_grpc.grpc.api.status import status_code_pb2
# Set up the Streamlit app
st.title("OpenAI Models Integration")
# ... (previous code)
# Display HTML content
html_content = """
<div align="center">
<p>
<a href="https://www.youtube.com/channel/UCyB_7yHs7y8u9rONDSXgkCg?view_as=subscriber" target="_blank">
<img width="100%" src="https://github.com/pyresearch/pyresearch/blob/main/ai.png"></a>
</p>
[English](README.md) | [简体中文](README.md)
</div>
## Installation
You can simply use pip to install the latest version of pyresearch.
<div align="center">`pip install pyresearch`</div>
<div align="center">
<a href="https://github.com/pyresearch/pyresearch" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/34125851/226594737-c21e2dda-9cc6-42ef-b4e7-a685fea4a21d.png" width="2%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
<a href="https://www.linkedin.com/company/pyresearch/" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/34125851/226596446-746ffdd0-a47e-4452-84e3-bf11ec2aa26a.png" width="2%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
<a href="https://twitter.com/Noorkhokhar10" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/34125851/226599162-9b11194e-4998-440a-ba94-c8a5e1cdc676.png" width="2%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
<a href="https://www.youtube.com/@Pyresearch" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/34125851/226599904-7d5cc5c0-89d2-4d1e-891e-19bee1951744.png" width="2%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
<a href="https://www.facebook.com/Pyresearch" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/34125851/226600380-a87a9142-e8e0-4ec9-bf2c-dd6e9da2f05a.png" width="2%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/34125851/226595799-160b0da3-c9e0-4562-8544-5f20460f7cc9.png" width="2%" alt="" />
<a href="https://www.instagram.com/pyresearch/" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/34125851/226601355-ffe0b597-9840-4e10-bbef-43d6c74b5a9e.png" width="2%" alt="" /></a>
</div>
<hr>
"""
st.markdown(html_content, unsafe_allow_html=True)
# Continue with the rest of the Streamlit app code...
# ... (as provided in the previous response)
# Model selection
model_option = st.selectbox("Select Model", ["GPT-4 Turbo", "GPT-4 Vision", "DALL-E API", "Text-to-Speech (TTS)"])
# Common Clarifai credentials
PAT = 'bfdeb4029ef54d23a2e608b0aa4c00e4'
USER_ID = 'openai'
APP_ID = 'chat-completion'
# Function to make API request and display output
def make_api_request(model_id, model_version_id, raw_text):
channel = ClarifaiChannel.get_grpc_channel()
stub = service_pb2_grpc.V2Stub(channel)
metadata = (('authorization', 'Key ' + PAT),)
userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID)
post_model_outputs_response = stub.PostModelOutputs(
service_pb2.PostModelOutputsRequest(
user_app_id=userDataObject,
model_id=model_id,
version_id=model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(
text=resources_pb2.Text(
raw=raw_text
)
)
)
]
),
metadata=metadata
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
st.error(f"Clarifai API request failed: {post_model_outputs_response.status.description}")
else:
return post_model_outputs_response.outputs[0].data.image.base64
# GPT-4 Turbo
if model_option == "GPT-4 Turbo":
model_id = 'gpt-4-turbo'
model_version_id = '182136408b4b4002a920fd500839f2c8'
raw_text = st.text_area("Enter text prompt:", 'I love your Pyresearch very much')
if st.button("Generate Text"):
output_text = make_api_request(model_id, model_version_id, raw_text)
st.write("Generated Text:", output_text)
# GPT-4 Vision
# ... (previous code)
# GPT-4 Vision
elif model_option == "GPT-4 Vision":
model_id = 'gpt-4-vision-alternative'
model_version_id = '12b67ac2b5894fb9af9c06ebf8dc02fb'
raw_text = st.text_area("Enter text prompt for vision:", 'A penguin watching the sunset.')
if st.button("Generate Image (GPT-4 Vision)"):
output_image = make_api_request(model_id, model_version_id, raw_text)
try:
# Attempt to display the image
st.image(output_image, caption='Generated Image', use_column_width=True)
except Exception as e:
st.error(f"Error displaying image: {e}")
# ...