Spaces:
Runtime error
Runtime error
File size: 4,414 Bytes
7efd637 b6ef90b c33dbd2 ca8dc25 5ee7ec4 b6ef90b bd796ec ca8dc25 c33dbd2 c27316e e98c6cb c27316e 04fcd0b c27316e 04fcd0b d107cdf bd796ec c0f2d6a bd796ec 5ee7ec4 26df791 c27316e efae69e c27316e 26df791 9dc7fb7 efae69e bd796ec 26df791 bd796ec efae69e bd796ec 26df791 bd796ec efae69e 26df791 6719d1c 26df791 efae69e 20b1f08 bd796ec 82ee039 034341f 20b1f08 efae69e 20b1f08 efae69e 26df791 efae69e 20b1f08 efae69e 20b1f08 034341f 7efd637 c33dbd2 b6ef90b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import gradio as gr
from PIL import Image
import requests
import os
from together import Together
import base64
from threading import Thread
import time
import io
# Initialize Together client
client = None
def initialize_client(api_key=None):
global client
if api_key:
os.environ["TOGETHER_API_KEY"] = api_key
if "TOGETHER_API_KEY" in os.environ:
client = Together()
else:
raise ValueError("Please provide a Together API Key")
def encode_image(image_path, max_size=(800, 800), quality=85):
with Image.open(image_path) as img:
img.thumbnail(max_size)
if img.mode in ('RGBA', 'LA'):
background = Image.new(img.mode[:-1], img.size, (255, 255, 255))
background.paste(img, mask=img.split()[-1])
img = background
buffered = io.BytesIO()
img.save(buffered, format="JPEG", quality=quality)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
def bot_streaming(message, history, together_api_key, max_new_tokens=250, temperature=0.7):
if client is None:
try:
initialize_client(together_api_key)
except Exception as e:
yield f"Error initializing client: {str(e)}"
return
prompt = "You are a helpful AI assistant. Analyze the image provided (if any) and respond to the user's query or comment."
messages = [{"role": "system", "content": prompt}]
# Add history to messages
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
# Prepare the current message
current_message = {"role": "user", "content": []}
# Add text content
if message.get("text"):
current_message["content"].append({"type": "text", "text": message["text"]})
# Add image content if present
if message.get("files") and len(message["files"]) > 0:
image_path = message["files"][0]["path"] if isinstance(message["files"][0], dict) else message["files"][0]
image_base64 = encode_image(image_path)
current_message["content"].append({
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_base64}"}
})
messages.append(current_message)
try:
stream = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
messages=messages,
max_tokens=max_new_tokens,
temperature=temperature,
stream=True,
)
response = ""
for chunk in stream:
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
response += chunk.choices[0].delta.content
yield response
if not response:
yield "No response generated. Please try again."
except Exception as e:
if "Request Entity Too Large" in str(e):
yield "The image is too large. Please try with a smaller image or compress the existing one."
else:
yield f"An error occurred: {str(e)}"
with gr.Blocks() as demo:
gr.Markdown("# Meta Llama-3.2-11B-Vision-Instruct (FREE)")
gr.Markdown("Try the new Llama 3.2 11B Vision API by Meta for free through Together AI. Upload an image, and start chatting about it. Just paste in your Together AI API key and get started!")
with gr.Row():
together_api_key = gr.Textbox(
label="Together API Key",
placeholder="Enter your Together API key here",
type="password"
)
with gr.Row():
max_new_tokens = gr.Slider(
minimum=10,
maximum=500,
value=250,
step=10,
label="Maximum number of new tokens",
)
temperature = gr.Number(
value=0.7,
minimum=0,
maximum=1,
step=0.1,
label="Temperature"
)
chatbot = gr.Chatbot()
msg = gr.MultimodalTextbox(label="Enter text or upload an image")
clear = gr.Button("Clear")
msg.submit(bot_streaming, [msg, chatbot, together_api_key, max_new_tokens, temperature], chatbot)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(debug=True) |