Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import os | |
import json # Added for debug printing payloads | |
import base64 | |
from PIL import Image | |
import io | |
ACCESS_TOKEN = os.getenv("HF_TOKEN") | |
print(f"Access token from HF_TOKEN env var loaded. Is it None? {ACCESS_TOKEN is None}. Length if not None: {len(ACCESS_TOKEN) if ACCESS_TOKEN else 'N/A'}") | |
# Function to encode image to base64 | |
def encode_image(image_path_or_pil): | |
if not image_path_or_pil: | |
print("No image path or PIL Image provided to encode_image") | |
return None | |
try: | |
# print(f"Encoding image. Input type: {type(image_path_or_pil)}") # Debug | |
if isinstance(image_path_or_pil, Image.Image): | |
image = image_path_or_pil | |
# print("Input is already a PIL Image.") | |
elif isinstance(image_path_or_pil, str): | |
# print(f"Input is a path string: {image_path_or_pil}") | |
if not os.path.exists(image_path_or_pil): | |
print(f"Error: Image path does not exist: {image_path_or_pil}") | |
return None | |
image = Image.open(image_path_or_pil) | |
else: | |
print(f"Error: Unsupported type for encode_image: {type(image_path_or_pil)}") | |
return None | |
if image.mode == 'RGBA': | |
# print("Converting RGBA image to RGB.") | |
image = image.convert('RGB') | |
buffered = io.BytesIO() | |
image.save(buffered, format="JPEG") | |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") | |
# print("Image encoded successfully to base64.") | |
return img_str | |
except Exception as e: | |
print(f"Error encoding image: {e}") | |
return None | |
def respond( | |
message, | |
image_files, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
frequency_penalty, | |
seed, | |
provider, | |
custom_api_key, # This is the value from byok_textbox | |
custom_model, | |
model_search_term, | |
selected_model | |
): | |
print(f"--- New Respond Call ---") | |
print(f"Received message: '{message}'") | |
print(f"Received {len(image_files) if image_files else 0} image files.") | |
# print(f"History length: {len(history)}") # History can be verbose | |
print(f"System message: '{system_message}'") | |
print(f"Generation Params: MaxTokens={max_tokens}, Temp={temperature}, TopP={top_p}, FreqPenalty={frequency_penalty}, Seed={seed}") | |
print(f"Selected provider: '{provider}'") | |
# Explicitly show the raw custom_api_key received | |
raw_key_type = type(custom_api_key) | |
raw_key_len = len(custom_api_key) if isinstance(custom_api_key, str) else 'N/A (not a string)' | |
print(f"Raw custom_api_key from UI: type={raw_key_type}, length={raw_key_len}") | |
if isinstance(custom_api_key, str) and len(custom_api_key) > 0: | |
print(f"Raw custom_api_key (masked): '{custom_api_key[:4]}...{custom_api_key[-4:]}'" if len(custom_api_key) > 8 else custom_api_key) | |
token_to_use = None | |
effective_custom_key = "" | |
if custom_api_key and isinstance(custom_api_key, str): # Ensure it's a string and not None | |
effective_custom_key = custom_api_key.strip() | |
if effective_custom_key: # True if string is not empty after stripping | |
token_to_use = effective_custom_key | |
print(f"TOKEN SELECTION: USING CUSTOM API KEY (BYOK). Length: {len(token_to_use)}") | |
if ACCESS_TOKEN and token_to_use == ACCESS_TOKEN: | |
print("INFO: Custom key is identical to the environment HF_TOKEN.") | |
else: | |
token_to_use = ACCESS_TOKEN # This will be None if HF_TOKEN is not set or empty | |
if token_to_use: | |
print(f"TOKEN SELECTION: USING DEFAULT API KEY (HF_TOKEN from env). Length: {len(token_to_use)}") | |
else: | |
print("TOKEN SELECTION: DEFAULT API KEY (HF_TOKEN from env) IS NOT SET or EMPTY. Custom key was also empty.") | |
if not token_to_use: | |
print("CRITICAL WARNING: No API token determined (neither custom nor default was usable/provided). Inference will likely fail or use public access if supported by model/provider.") | |
# InferenceClient will handle token=None by trying its own env var lookup or failing. | |
else: | |
# For debugging, print a masked version of the token being finally used | |
if isinstance(token_to_use, str) and len(token_to_use) > 8: | |
print(f"FINAL TOKEN for InferenceClient: '{token_to_use[:4]}...{token_to_use[-4:]}' (masked)") | |
elif isinstance(token_to_use, str): | |
print(f"FINAL TOKEN for InferenceClient: '{token_to_use}' (short token)") | |
else: # Should not happen if logic above is correct and token_to_use is string or None | |
print(f"FINAL TOKEN for InferenceClient: {token_to_use} (not a string or None, unusual!)") | |
# Initialize the Inference Client with the provider and appropriate token | |
client = InferenceClient(token=token_to_use, provider=provider) | |
print(f"Hugging Face Inference Client initialized with provider: '{provider}'.") | |
if seed == -1: # Convert seed to None if -1 (meaning random) | |
seed = None | |
# Prepare user_content (current message with text and/or images) | |
user_content_parts = [] | |
if message and message.strip(): | |
user_content_parts.append({"type": "text", "text": message}) | |
if image_files and len(image_files) > 0: | |
for img_file_path in image_files: | |
if img_file_path: # img_file_path is a string path from Gradio MultimodalTextbox | |
encoded_image = encode_image(img_file_path) | |
if encoded_image: | |
user_content_parts.append({ | |
"type": "image_url", | |
"image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"} | |
}) | |
else: | |
print(f"Warning: Failed to encode image for current message: {img_file_path}") | |
# Determine final user_content structure | |
if not user_content_parts: # No text and no images | |
print("Warning: Current user message is empty (no text, no images).") | |
# Depending on API, might need to send empty string or handle this case. | |
# For now, let it proceed; API might error or interpret as empty prompt. | |
final_user_content = "" | |
elif len(user_content_parts) == 1 and user_content_parts[0]["type"] == "text": | |
final_user_content = user_content_parts[0]["text"] # Text-only, pass as string | |
else: | |
final_user_content = user_content_parts # Multimodal, pass as list of dicts | |
# Prepare messages list for the API | |
messages = [{"role": "system", "content": system_message}] | |
for hist_user_content, hist_assistant_content in history: | |
# hist_user_content can be string (text) or tuple (text, [image_paths]) | |
if hist_user_content: | |
if isinstance(hist_user_content, tuple) and len(hist_user_content) == 2: | |
# Multimodal history entry: (text, [list_of_image_paths]) | |
hist_text, hist_image_paths = hist_user_content | |
current_hist_user_parts = [] | |
if hist_text and hist_text.strip(): | |
current_hist_user_parts.append({"type": "text", "text": hist_text}) | |
if hist_image_paths: | |
for hist_img_path in hist_image_paths: | |
encoded_hist_img = encode_image(hist_img_path) | |
if encoded_hist_img: | |
current_hist_user_parts.append({ | |
"type": "image_url", | |
"image_url": {"url": f"data:image/jpeg;base64,{encoded_hist_img}"} | |
}) | |
else: | |
print(f"Warning: Failed to encode history image: {hist_img_path}") | |
if current_hist_user_parts: # Only add if there's content | |
messages.append({"role": "user", "content": current_hist_user_parts}) | |
elif isinstance(hist_user_content, str): # Text-only history entry | |
messages.append({"role": "user", "content": hist_user_content}) | |
else: | |
print(f"Warning: Unexpected type for history user content: {type(hist_user_content)}") | |
if hist_assistant_content: | |
messages.append({"role": "assistant", "content": hist_assistant_content}) | |
messages.append({"role": "user", "content": final_user_content}) | |
# print(f"Final messages object for API: {json.dumps(messages, indent=2)}") # Very verbose, use for deep debugging | |
model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model | |
print(f"Model selected for inference: '{model_to_use}'") | |
response_text = "" | |
print(f"Sending request to provider '{provider}' for model '{model_to_use}'. Streaming enabled.") | |
parameters = { | |
"max_tokens": max_tokens, | |
"temperature": temperature, | |
"top_p": top_p, | |
"frequency_penalty": frequency_penalty, | |
} | |
if seed is not None: | |
parameters["seed"] = seed | |
try: | |
stream = client.chat_completion( | |
model=model_to_use, | |
messages=messages, | |
stream=True, | |
**parameters | |
) | |
# print("Streaming response tokens: ", end="", flush=True) # Can be noisy | |
for chunk in stream: | |
if hasattr(chunk, 'choices') and len(chunk.choices) > 0: | |
delta = chunk.choices[0].delta | |
if delta and hasattr(delta, 'content') and delta.content: | |
token_text = delta.content | |
# print(token_text, end="", flush=True) # Handled by yield | |
response_text += token_text | |
yield response_text | |
# print("\nStream ended.") | |
except Exception as e: | |
error_message = f"{type(e).__name__}: {str(e)}" | |
print(f"ERROR DURING INFERENCE: {error_message}") | |
# If it's a client error (4xx), the request body might be relevant | |
if hasattr(e, 'response') and e.response is not None: | |
print(f"Error details: Status {e.response.status_code}. Response text: {e.response.text}") | |
if 400 <= e.response.status_code < 500: | |
try: | |
print(f"Offending request messages payload (first 1000 chars): {json.dumps(messages, indent=2)[:1000]}") | |
except Exception as E: | |
print(f"Could not dump messages payload: {E}") | |
response_text += f"\nAn error occurred: {error_message}" | |
yield response_text | |
print("Completed response generation for current call.") | |
# Function to validate provider selection based on BYOK | |
def validate_provider(api_key, provider_choice): # Renamed provider to provider_choice | |
# This function's purpose was to force hf-inference if no BYOK for other providers. | |
# However, InferenceClient handles provider-specific keys or HF token routing. | |
# For now, let's assume any key might work with any provider and let InferenceClient handle it. | |
# If a custom key is entered, it *could* be for any provider. | |
# If no custom key, and ACCESS_TOKEN is used, it's an HF_TOKEN, best for hf-inference or HF-managed providers. | |
# The current logic doesn't strictly need this validation if we trust InferenceClient. | |
# Keeping it simple: | |
# if not api_key.strip() and provider_choice != "hf-inference": | |
# print(f"No BYOK, but provider '{provider_choice}' selected. Forcing 'hf-inference'.") | |
# return gr.update(value="hf-inference") | |
return gr.update(value=provider_choice) # No change for now, allow user selection. | |
# GRADIO UI | |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo: | |
chatbot = gr.Chatbot( | |
height=600, | |
show_copy_button=True, | |
placeholder="Select a model, enter your message, and upload images if needed.", | |
layout="panel", | |
avatar_images=(None, "https://huggingface.co/chat/huggingchat/logo.svg") # Example bot avatar | |
) | |
msg = gr.MultimodalTextbox( | |
placeholder="Type a message or upload images...", | |
show_label=False, | |
container=False, | |
scale=12, # Ensure this is within a gr.Row() or similar if scale is used effectively | |
file_types=["image"], | |
file_count="multiple", # Allows multiple image uploads | |
sources=["upload"] # Can add "clipboard" | |
) | |
with gr.Accordion("Settings", open=False): | |
system_message_box = gr.Textbox( | |
value="You are a helpful AI assistant that can understand images and text.", | |
placeholder="You are a helpful assistant.", | |
label="System Prompt" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
max_tokens_slider = gr.Slider(1, 4096, value=512, step=1, label="Max new tokens") | |
temperature_slider = gr.Slider(0.1, 2.0, value=0.7, step=0.05, label="Temperature") # Range adjusted | |
top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P") | |
with gr.Column(): | |
frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty") | |
seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)") | |
providers_list = ["hf-inference", "cerebras", "together", "sambanova", "novita", "cohere", "fireworks-ai", "hyperbolic", "nebius"] | |
provider_radio = gr.Radio(choices=providers_list, value="hf-inference", label="Inference Provider") | |
byok_textbox = gr.Textbox( | |
value="", label="BYOK (Bring Your Own Key)", | |
info="Enter your API key. For 'hf-inference', use an HF token. For other providers, use their specific key or an HF token if supported.", | |
placeholder="Enter your API token here", type="password" | |
) | |
custom_model_box = gr.Textbox( | |
value="", label="Custom Model ID / Endpoint", | |
info="(Optional) Provide a custom model ID (e.g., 'meta-llama/Llama-3-70b-chat-hf') or full endpoint URL. Overrides featured model selection.", | |
placeholder="org/model-name or full URL" | |
) | |
model_search_box = gr.Textbox(label="Filter Featured Models", placeholder="Search...", lines=1) | |
models_list = [ | |
"meta-llama/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.3-70B-Instruct", | |
"meta-llama/Llama-3.1-70B-Instruct", "meta-llama/Llama-3.0-70B-Instruct", | |
"meta-llama/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-1B-Instruct", | |
"meta-llama/Llama-3.1-8B-Instruct", "NousResearch/Hermes-3-Llama-3.1-8B", | |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "mistralai/Mistral-Nemo-Instruct-2407", | |
"mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.3", | |
"mistralai/Mistral-7B-Instruct-v0.2", "Qwen/Qwen3-235B-A22B", "Qwen/Qwen3-32B", | |
"Qwen/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-0.5B-Instruct", | |
"Qwen/QwQ-32B", "Qwen/Qwen2.5-Coder-32B-Instruct", "microsoft/Phi-3.5-mini-instruct", | |
"microsoft/Phi-3-mini-128k-instruct", "microsoft/Phi-3-mini-4k-instruct", | |
] | |
featured_model_radio = gr.Radio( | |
label="Select a Featured Model", choices=models_list, | |
value="meta-llama/Llama-3.2-11B-Vision-Instruct", interactive=True | |
) | |
gr.Markdown("[All Text-to-Text Models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [All Multimodal Models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)") | |
# Chat history state (remains gr.State for proper handling by Gradio) | |
# The `chatbot` component itself manages its display state. | |
# We need a separate state if we want to manipulate the history structure before passing to API. | |
# The current `bot` function takes `chatbot` (which is history) directly. | |
# Revised user function for MultimodalTextbox | |
# It appends the user's input (text and/or files) to the chatbot history. | |
# The `bot` function will then process this history. | |
def handle_user_input(multimodal_input, chat_history_list): | |
text_input = multimodal_input.get("text", "").strip() | |
file_inputs = multimodal_input.get("files", []) # List of file paths | |
# print(f"User input: Text='{text_input}', Files={file_inputs}") | |
if not text_input and not file_inputs: | |
# print("User input empty, not adding to history.") | |
return chat_history_list # No change if input is empty | |
# For multimodal display in chatbot, we can represent images using Markdown. | |
# The actual file paths will be used by `respond` for API calls. | |
# We need to decide how to store this in history for `respond` | |
# Option 1: Store (text, [paths]) tuple for user turns. | |
# Option 2: Create separate entries for text and images. | |
# Let's use Option 1 for structured history, easier for `respond` | |
# The `chatbot` component can display a text representation. | |
display_entry_user = "" | |
if text_input: | |
display_entry_user += text_input | |
# For display in chatbot, we can use Markdown for images. | |
# For passing to `respond` via history, we need the actual paths. | |
# The `bot` function will unpack this. | |
# For `chatbot` display: | |
# If there are images, we can create a text representation. | |
# For example, just list "<image1> <image2>" or use Markdown if supported for local files. | |
# Gradio Chatbot displays images if the path is a local temp file path. | |
user_turn_content_for_api = (text_input, [f.name for f in file_inputs if f] if file_inputs else []) | |
# For chatbot display: | |
# Gradio's Chatbot can display images directly if you pass a list like: | |
# [[(image_path1,), (image_path2,)], None] for an image-only user message | |
# Or [[text_input, (image_path1,)], None] | |
# Let's try to prepare for this. | |
if file_inputs: | |
# If there's text AND files, Gradio expects text first, then tuples for files. | |
# e.g., history.append( [ [text_input] + [(file.name,) for file in file_inputs], None] ) | |
# Or, more simply, if Chatbot handles multimodal input display well: | |
chatbot_user_message = [] | |
if text_input: | |
chatbot_user_message.append(text_input) | |
for file_obj in file_inputs: | |
if file_obj and hasattr(file_obj, 'name'): # file_obj is a TemporaryFileWrapper | |
chatbot_user_message.append((file_obj.name,)) # Tuple for image path | |
chat_history_list.append([chatbot_user_message, None]) | |
elif text_input: # Text only | |
chat_history_list.append([text_input, None]) | |
# The `bot` function will receive `chat_history_list`. | |
# It needs to reconstruct text and image paths from `chat_history_list[-1][0]` | |
# to pass to `respond`'s `message` and `image_files` parameters. | |
return chat_history_list | |
# Revised bot function to handle history from handle_user_input | |
def process_bot_response( | |
current_chat_history, # This is the full history from the chatbot | |
system_msg, max_tkns, temp, tp_p, freq_pen, sd, prov, api_k, cust_model, srch_term, sel_model | |
): | |
if not current_chat_history or not current_chat_history[-1][0]: | |
print("Bot: History is empty or last user message is empty.") | |
return current_chat_history # Or yield current_chat_history | |
last_user_turn_content = current_chat_history[-1][0] # This is what handle_user_input created | |
# Extract text and image paths from last_user_turn_content | |
current_message_text = "" | |
current_image_paths = [] | |
if isinstance(last_user_turn_content, str): # Text-only | |
current_message_text = last_user_turn_content | |
elif isinstance(last_user_turn_content, list): # Potentially multimodal from handle_user_input | |
for item in last_user_turn_content: | |
if isinstance(item, str): | |
current_message_text = item # Assumes one text part | |
elif isinstance(item, tuple) and len(item) > 0 and isinstance(item[0], str): | |
current_image_paths.append(item[0]) # item[0] is the image path | |
# print(f"Bot: Extracted for respond - Text='{current_message_text}', Images={current_image_paths}") | |
# History for `respond` should be all turns *except* the current one. | |
history_for_api = [] | |
for user_content, assistant_content in current_chat_history[:-1]: | |
# Reconstruct (text, [paths]) structure for history items if they were multimodal | |
# This part needs careful handling if history itself contains multimodal user turns | |
# For simplicity, assuming history user_content is string or already (text, [paths]) | |
# The current `handle_user_input` makes `user_content` a list for multimodal. | |
# This needs to be harmonized. | |
# Let's simplify: `respond` will parse history. We just pass it. | |
# The `respond` function's history processing needs to handle the new format. | |
# The `respond` function expects history items to be: | |
# user_part: str OR (text_str, [img_paths_list]) | |
# assistant_part: str | |
# Let's re-structure history_for_api based on how `handle_user_input` formats it. | |
# `handle_user_input` stores `chatbot_user_message` which is `[text, (path1,), (path2,)]` or `text` | |
# `respond` needs to be adapted for this history format if we pass it directly. | |
# For now, let's adapt the history passed to `respond` to its expected format. | |
api_hist_user_entry = None | |
if isinstance(user_content, str): # Simple text history | |
api_hist_user_entry = user_content | |
elif isinstance(user_content, list): # Multimodal history from `handle_user_input` | |
hist_text = "" | |
hist_paths = [] | |
for item in user_content: | |
if isinstance(item, str): hist_text = item | |
elif isinstance(item, tuple): hist_paths.append(item[0]) | |
api_hist_user_entry = (hist_text, hist_paths) | |
history_for_api.append( (api_hist_user_entry, assistant_content) ) | |
# Call respond with the current message parts and the processed history | |
# The `respond` function's first two args are `message` (text) and `image_files` (list of paths) | |
# for the *current* turn. | |
# Clear the placeholder for bot's response in the last history item | |
current_chat_history[-1][1] = "" | |
stream = respond( | |
current_message_text, | |
current_image_paths, | |
history_for_api, # Pass the history *before* the current turn | |
system_msg, max_tkns, temp, tp_p, freq_pen, sd, prov, api_k, cust_model, srch_term, sel_model | |
) | |
for partial_response in stream: | |
current_chat_history[-1][1] = partial_response | |
yield current_chat_history | |
# Event handlers | |
# 1. User submits message (text and/or files) | |
# 2. `handle_user_input` updates chatbot history with user's message. | |
# 3. `process_bot_response` takes this new history, calls API, and streams response back to chatbot. | |
submit_event = msg.submit( | |
handle_user_input, | |
inputs=[msg, chatbot], # Pass current message and full history | |
outputs=[chatbot], # Update chatbot with user's message | |
queue=False # Process user input quickly | |
).then( | |
process_bot_response, | |
inputs=[ | |
chatbot, # Full history including the latest user message | |
system_message_box, max_tokens_slider, temperature_slider, top_p_slider, | |
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, | |
custom_model_box, model_search_box, featured_model_radio | |
], | |
outputs=[chatbot] # Stream bot's response to chatbot | |
).then( | |
lambda: gr.update(value=None), # Clear MultimodalTextbox (text and files) | |
None, # No inputs | |
[msg], # Target component to clear | |
queue=False | |
) | |
def filter_models_choices(search_term): | |
# print(f"Filtering models with: '{search_term}'") | |
if not search_term: return gr.update(choices=models_list) | |
filtered = [m for m in models_list if search_term.lower() in m.lower()] | |
# print(f"Filtered models: {filtered}") | |
return gr.update(choices=filtered if filtered else []) | |
model_search_box.change(fn=filter_models_choices, inputs=model_search_box, outputs=featured_model_radio) | |
# When a featured model is selected, it could optionally update the custom_model_box. | |
# For now, custom_model_box is an override. If empty, featured_model_radio is used by `respond`. | |
# No direct link needed unless you want radio to populate custom_model_box. | |
# Provider validation (simplified, as InferenceClient handles token logic) | |
byok_textbox.change(fn=validate_provider, inputs=[byok_textbox, provider_radio], outputs=provider_radio) | |
provider_radio.change(fn=validate_provider, inputs=[byok_textbox, provider_radio], outputs=provider_radio) | |
print("Gradio UI defined. Initializing...") | |
if __name__ == "__main__": | |
print("Launching Gradio demo...") | |
demo.launch(show_api=True, debug=True) # Enable debug for more Gradio logs | |
print("Gradio demo launched.") |