suayptalha commited on
Commit
1e5a891
·
verified ·
1 Parent(s): c617e3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -19
app.py CHANGED
@@ -1,35 +1,71 @@
1
  import gradio as gr
 
2
  from gradio_client import Client, handle_file
3
 
4
- # Gradio Client
5
- client = Client("vikhyatk/moondream2") # Replace with your Gradio app URL or identifier
6
 
7
-
8
- def process_image_upload(image):
9
- """
10
- This function processes the uploaded image and sends it to the Gradio app.
11
- """
12
- # Convert the uploaded image to a format compatible with the Gradio Client
13
- image_file = handle_file(image) # Handle the uploaded image as a file
14
  result = client.predict(
15
- img=image_file,
16
  prompt="Describe this image.",
17
  api_name="/answer_question"
18
  )
19
  return result
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # Gradio UI
23
- with gr.Blocks() as demo:
24
- gr.Markdown("## Image Description with Gradio Client")
 
25
 
26
- with gr.Row():
27
- image_input = gr.Image(type="file", label="Upload an Image")
28
- output_text = gr.Textbox(label="Image Description", interactive=False)
 
 
 
 
 
 
 
 
 
 
29
 
30
- submit_button = gr.Button("Get Description")
 
31
 
32
- submit_button.click(fn=process_image_upload, inputs=image_input, outputs=output_text)
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  if __name__ == "__main__":
35
- demo.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
  from gradio_client import Client, handle_file
4
 
5
+ # Initialize the HuggingFace InferenceClient or another client if needed
6
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
8
+ # Function to process image and return a description using another API (e.g., Moondream2)
9
+ def describe_image(image):
10
+ # Call the external API to get a description of the image
 
 
 
 
11
  result = client.predict(
12
+ img=handle_file(image),
13
  prompt="Describe this image.",
14
  api_name="/answer_question"
15
  )
16
  return result
17
 
18
+ def respond(
19
+ message,
20
+ history: list[tuple[str, str]],
21
+ system_message,
22
+ max_tokens,
23
+ temperature,
24
+ top_p,
25
+ image
26
+ ):
27
+ messages = [{"role": "system", "content": system_message}]
28
+
29
+ for val in history:
30
+ if val[0]:
31
+ messages.append({"role": "user", "content": val[0]})
32
+ if val[1]:
33
+ messages.append({"role": "assistant", "content": val[1]})
34
 
35
+ # Process the image if provided
36
+ if image:
37
+ description = describe_image(image)
38
+ return description
39
 
40
+ # If no image, proceed with the usual chat flow
41
+ messages.append({"role": "user", "content": message})
42
+
43
+ response = ""
44
+
45
+ for message in client.chat_completion(
46
+ messages,
47
+ max_tokens=max_tokens,
48
+ stream=True,
49
+ temperature=temperature,
50
+ top_p=top_p,
51
+ ):
52
+ token = message.choices[0].delta.content
53
 
54
+ response += token
55
+ yield response
56
 
57
+ # Set up the Gradio interface with image input and output for description
58
+ demo = gr.Interface(
59
+ fn=respond,
60
+ inputs=[
61
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
62
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
63
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
64
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
65
+ gr.Image(type="file", label="Upload Image") # Allow image upload
66
+ ],
67
+ outputs="text", # Display the description as text output
68
+ )
69
 
70
  if __name__ == "__main__":
71
+ demo.launch()