File size: 1,407 Bytes
87ce80f d8ff437 77d3dbe 87ce80f 7b6d332 942dab0 8f1cf32 4e4190b 8f1cf32 fab8ffe 219615a 8f1cf32 09818f0 942dab0 0004fe0 8e1991d 8f1cf32 4e4190b 09818f0 8f1cf32 4e4190b 942dab0 0004fe0 4e4190b 942dab0 e719a30 4e4190b 8e1991d 4e4190b 219615a b5de101 8e1991d b5de101 e719a30 dceb63e 0004fe0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
from gradio_client import Client, handle_file
from huggingface_hub import InferenceClient
moondream_client = Client("vikhyatk/moondream2")
qwq_client = InferenceClient("Qwen/QwQ-32B-Preview")
def describe_image(image, user_message):
result = moondream_client.predict(
img=handle_file(image),
prompt="Describe this image.",
api_name="/answer_question"
)
description = result
user_message = description + "\n" + user_message
qwq_result = qwq_client.chat_completion(
messages=[{"role": "user", "content": user_message}],
max_tokens=512,
temperature=0.7,
top_p=0.95
)
return qwq_result['choices'][0]['message']['content']
def chat_or_image(image, user_message):
if image:
return describe_image(image, user_message)
else:
qwq_result = qwq_client.chat_completion(
messages=[{"role": "user", "content": user_message}],
max_tokens=512,
temperature=0.7,
top_p=0.95
)
return qwq_result['choices'][0]['message']['content']
demo = gr.Interface(
fn=chat_or_image,
inputs=[
gr.Image(type="filepath", label="Upload image (Optional)"),
gr.Textbox(label="Ask anything", placeholder="Ask...", lines=2)
],
outputs="text",
)
if __name__ == "__main__":
demo.launch(show_error=True)
|