|
import gradio as gr |
|
from gradio_client import Client, handle_file |
|
from huggingface_hub import InferenceClient |
|
|
|
moondream_client = Client("vikhyatk/moondream2") |
|
llama_client = InferenceClient("Qwen/QwQ-32B-Preview") |
|
|
|
history = [] |
|
|
|
def describe_image(image, user_message): |
|
global history |
|
result = moondream_client.predict( |
|
img=handle_file(image), |
|
prompt="Describe this image.", |
|
api_name="/answer_question" |
|
) |
|
|
|
description = result |
|
history.append(f"User: {user_message}") |
|
history.append(f"Assistant: {description}") |
|
|
|
full_conversation = "\n".join(history) |
|
return respond(user_message, history, "System: Describe the image.", 512, 0.7, 0.95) |
|
|
|
def respond(message, history, system_message, max_tokens, temperature, top_p): |
|
messages = [{"role": "system", "content": system_message}] |
|
|
|
for val in history: |
|
if val[0]: |
|
messages.append({"role": "user", "content": val[0]}) |
|
if val[1]: |
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
response = "" |
|
for message in llama_client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
): |
|
token = message.choices[0].delta.content |
|
response += token |
|
yield response |
|
|
|
def chat_or_image(image, user_message): |
|
global history |
|
if image: |
|
return describe_image(image, user_message) |
|
else: |
|
history.append(f"User: {user_message}") |
|
return respond(user_message, history, "System: Answer the user's question.", 512, 0.7, 0.95) |
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
gr.Image(type="filepath", label="Upload Image (optional)", elem_id="left-column") |
|
with gr.Column(scale=2): |
|
gr.Textbox(label="Ask or Chat", placeholder="Ask a question...", lines=2, elem_id="right-column") |
|
|
|
demo.launch(show_error=True) |
|
|