Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,413 +1,182 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
|
|
|
|
3 |
import torch
|
4 |
-
|
5 |
-
|
6 |
-
from huggingface_hub import hf_hub_download
|
7 |
-
|
8 |
-
from controlnet_union import ControlNetModel_Union
|
9 |
-
from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
|
10 |
-
|
11 |
-
from PIL import Image, ImageDraw
|
12 |
import numpy as np
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
config = ControlNetModel_Union.load_config(config_file)
|
20 |
-
controlnet_model = ControlNetModel_Union.from_config(config)
|
21 |
-
model_file = hf_hub_download(
|
22 |
-
"xinsir/controlnet-union-sdxl-1.0",
|
23 |
-
filename="diffusion_pytorch_model_promax.safetensors",
|
24 |
-
)
|
25 |
-
|
26 |
-
sstate_dict = load_state_dict(model_file)
|
27 |
-
model, _, _, _, _ = ControlNetModel_Union._load_pretrained_model(
|
28 |
-
controlnet_model, sstate_dict, model_file, "xinsir/controlnet-union-sdxl-1.0"
|
29 |
)
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
"
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
# Draw the mask
|
148 |
-
mask_draw.rectangle([
|
149 |
-
(left_overlap, top_overlap),
|
150 |
-
(right_overlap, bottom_overlap)
|
151 |
-
], fill=0)
|
152 |
-
|
153 |
-
return background, mask
|
154 |
-
|
155 |
-
@spaces.GPU(duration=28)
|
156 |
-
def infer(image, width, height, overlap_percentage, num_inference_steps, resize_option, custom_resize_percentage, prompt_input, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom, selected_model):
|
157 |
-
background, mask = prepare_image_and_mask(image, width, height, overlap_percentage, resize_option, custom_resize_percentage, alignment, overlap_left, overlap_right, overlap_top, overlap_bottom)
|
158 |
-
|
159 |
-
cnet_image = background.copy()
|
160 |
-
cnet_image.paste(0, (0, 0), mask)
|
161 |
-
|
162 |
-
final_prompt = f"{prompt_input} , high quality, 4k"
|
163 |
-
|
164 |
-
# Access the selected pipeline from the dictionary
|
165 |
-
pipe = pipelines[selected_model]
|
166 |
-
|
167 |
-
(
|
168 |
-
prompt_embeds,
|
169 |
-
negative_prompt_embeds,
|
170 |
-
pooled_prompt_embeds,
|
171 |
-
negative_pooled_prompt_embeds,
|
172 |
-
) = pipe.encode_prompt(final_prompt, "cuda", True)
|
173 |
-
|
174 |
-
# Generate the image
|
175 |
-
for image in pipe(
|
176 |
-
prompt_embeds=prompt_embeds,
|
177 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
178 |
-
pooled_prompt_embeds=pooled_prompt_embeds,
|
179 |
-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
180 |
-
image=cnet_image,
|
181 |
-
num_inference_steps=num_inference_steps
|
182 |
-
):
|
183 |
-
pass # Wait for the generation to complete
|
184 |
-
generated_image = image # Get the last image
|
185 |
-
|
186 |
-
generated_image = generated_image.convert("RGBA")
|
187 |
-
cnet_image.paste(generated_image, (0, 0), mask)
|
188 |
-
|
189 |
-
return cnet_image
|
190 |
-
|
191 |
-
|
192 |
-
def clear_result():
|
193 |
-
"""Clears the result Image."""
|
194 |
-
return gr.update(value=None)
|
195 |
-
|
196 |
-
|
197 |
-
def preload_presets(target_ratio, ui_width, ui_height):
|
198 |
-
"""Updates the width and height sliders based on the selected aspect ratio."""
|
199 |
-
if target_ratio == "9:16":
|
200 |
-
changed_width = 720
|
201 |
-
changed_height = 1280
|
202 |
-
return changed_width, changed_height, gr.update()
|
203 |
-
elif target_ratio == "16:9":
|
204 |
-
changed_width = 1280
|
205 |
-
changed_height = 720
|
206 |
-
return changed_width, changed_height, gr.update()
|
207 |
-
elif target_ratio == "1:1":
|
208 |
-
changed_width = 1024
|
209 |
-
changed_height = 1024
|
210 |
-
return changed_width, changed_height, gr.update()
|
211 |
-
elif target_ratio == "Custom":
|
212 |
-
return ui_width, ui_height, gr.update(open=True)
|
213 |
-
|
214 |
-
|
215 |
-
def select_the_right_preset(user_width, user_height):
|
216 |
-
if user_width == 720 and user_height == 1280:
|
217 |
-
return "9:16"
|
218 |
-
elif user_width == 1280 and user_height == 720:
|
219 |
-
return "16:9"
|
220 |
-
elif user_width == 1024 and user_height == 1024:
|
221 |
-
return "1:1"
|
222 |
else:
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
with gr.Row():
|
272 |
-
target_ratio = gr.Radio(
|
273 |
-
label="Expected Ratio",
|
274 |
-
choices=["9:16", "16:9", "1:1", "Custom"],
|
275 |
-
value="9:16",
|
276 |
-
scale=2
|
277 |
-
)
|
278 |
-
|
279 |
-
alignment_dropdown = gr.Dropdown(
|
280 |
-
choices=["Middle", "Left", "Right", "Top", "Bottom"],
|
281 |
-
value="Middle",
|
282 |
-
label="Alignment"
|
283 |
-
)
|
284 |
-
|
285 |
-
with gr.Accordion(label="Advanced settings", open=False) as settings_panel:
|
286 |
-
with gr.Column():
|
287 |
-
with gr.Row():
|
288 |
-
width_slider = gr.Slider(
|
289 |
-
label="Width",
|
290 |
-
minimum=720,
|
291 |
-
maximum=1536,
|
292 |
-
step=8,
|
293 |
-
value=720,
|
294 |
-
)
|
295 |
-
height_slider = gr.Slider(
|
296 |
-
label="Height",
|
297 |
-
minimum=720,
|
298 |
-
maximum=1536,
|
299 |
-
step=8,
|
300 |
-
value=1280,
|
301 |
-
)
|
302 |
-
|
303 |
-
num_inference_steps = gr.Slider(label="Steps", minimum=4, maximum=12, step=1, value=8)
|
304 |
-
with gr.Group():
|
305 |
-
overlap_percentage = gr.Slider(
|
306 |
-
label="Mask overlap (%)",
|
307 |
-
minimum=1,
|
308 |
-
maximum=50,
|
309 |
-
value=10,
|
310 |
-
step=1
|
311 |
-
)
|
312 |
-
with gr.Row():
|
313 |
-
overlap_top = gr.Checkbox(label="Overlap Top", value=True)
|
314 |
-
overlap_right = gr.Checkbox(label="Overlap Right", value=True)
|
315 |
-
with gr.Row():
|
316 |
-
overlap_left = gr.Checkbox(label="Overlap Left", value=True)
|
317 |
-
overlap_bottom = gr.Checkbox(label="Overlap Bottom", value=True)
|
318 |
-
with gr.Row():
|
319 |
-
resize_option = gr.Radio(
|
320 |
-
label="Resize input image",
|
321 |
-
#choices=["Full", "50%", "33%", "25%", "Custom"],
|
322 |
-
choices=["Full", "50%", "33%", "25%", "Custom"],
|
323 |
-
value="Full"
|
324 |
-
)
|
325 |
-
custom_resize_percentage = gr.Slider(
|
326 |
-
label="Custom resize (%)",
|
327 |
-
minimum=1,
|
328 |
-
maximum=100,
|
329 |
-
step=1,
|
330 |
-
value=50,
|
331 |
-
visible=False
|
332 |
-
)
|
333 |
-
|
334 |
-
gr.Examples(
|
335 |
-
examples=[
|
336 |
-
["./examples/3.jpg", 1024, 1024, "Top"],
|
337 |
-
["./examples/4.jpg", 1024, 1024, "Middle"],
|
338 |
-
["./examples/2.png", 720, 1280, "Left"],
|
339 |
-
["./examples/1.png", 1280, 720, "Bottom"],
|
340 |
-
["./examples/5.jpg", 1024, 1024, "Bottom"],
|
341 |
-
],
|
342 |
-
inputs=[input_image, width_slider, height_slider, alignment_dropdown],
|
343 |
-
)
|
344 |
-
|
345 |
-
with gr.Column():
|
346 |
-
result = gr.Image(
|
347 |
-
interactive=False,
|
348 |
-
label="Generated Image",
|
349 |
-
format="png",
|
350 |
-
)
|
351 |
-
history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
|
352 |
-
|
353 |
-
target_ratio.change(
|
354 |
-
fn=preload_presets,
|
355 |
-
inputs=[target_ratio, width_slider, height_slider],
|
356 |
-
outputs=[width_slider, height_slider, settings_panel],
|
357 |
-
queue=False
|
358 |
-
)
|
359 |
-
|
360 |
-
width_slider.change(
|
361 |
-
fn=select_the_right_preset,
|
362 |
-
inputs=[width_slider, height_slider],
|
363 |
-
outputs=[target_ratio],
|
364 |
-
queue=False
|
365 |
-
)
|
366 |
-
|
367 |
-
height_slider.change(
|
368 |
-
fn=select_the_right_preset,
|
369 |
-
inputs=[width_slider, height_slider],
|
370 |
-
outputs=[target_ratio],
|
371 |
-
queue=False
|
372 |
-
)
|
373 |
-
|
374 |
-
resize_option.change(
|
375 |
-
fn=toggle_custom_resize_slider,
|
376 |
-
inputs=[resize_option],
|
377 |
-
outputs=[custom_resize_percentage],
|
378 |
-
queue=False
|
379 |
-
)
|
380 |
-
|
381 |
-
run_button.click(
|
382 |
-
fn=clear_result,
|
383 |
-
inputs=None,
|
384 |
-
outputs=result,
|
385 |
-
).then(
|
386 |
-
fn=infer,
|
387 |
-
inputs=[input_image, width_slider, height_slider, overlap_percentage, num_inference_steps,
|
388 |
-
resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
|
389 |
-
overlap_left, overlap_right, overlap_top, overlap_bottom, model_selector],
|
390 |
-
outputs=result,
|
391 |
-
).then(
|
392 |
-
fn=lambda x, history: update_history(x, history),
|
393 |
-
inputs=[result, history_gallery],
|
394 |
-
outputs=history_gallery,
|
395 |
-
)
|
396 |
-
|
397 |
-
prompt_input.submit(
|
398 |
-
fn=clear_result,
|
399 |
-
inputs=None,
|
400 |
-
outputs=result,
|
401 |
-
).then(
|
402 |
-
fn=infer,
|
403 |
-
inputs=[input_image, width_slider, height_slider, overlap_percentage, num_inference_steps,
|
404 |
-
resize_option, custom_resize_percentage, prompt_input, alignment_dropdown,
|
405 |
-
overlap_left, overlap_right, overlap_top, overlap_bottom, model_selector],
|
406 |
-
outputs=result,
|
407 |
-
).then(
|
408 |
-
fn=lambda x, history: update_history(x, history),
|
409 |
-
inputs=[result, history_gallery],
|
410 |
-
outputs=history_gallery,
|
411 |
-
)
|
412 |
|
413 |
-
demo.
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers.image_utils import load_image
|
3 |
+
from threading import Thread
|
4 |
+
import time
|
5 |
import torch
|
6 |
+
import spaces
|
7 |
+
import cv2
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
import numpy as np
|
9 |
+
from PIL import Image
|
10 |
+
from transformers import (
|
11 |
+
Qwen2VLForConditionalGeneration,
|
12 |
+
AutoProcessor,
|
13 |
+
TextIteratorStreamer,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
)
|
15 |
+
from transformers import Qwen2_5_VLForConditionalGeneration
|
16 |
+
from pdf2image import convert_from_path
|
17 |
+
|
18 |
+
# Helper Functions
|
19 |
+
def progress_bar_html(label: str, primary_color: str = "#4B0082", secondary_color: str = "#9370DB") -> str:
|
20 |
+
"""
|
21 |
+
Returns an HTML snippet for a thin animated progress bar with a label.
|
22 |
+
Colors can be customized; default colors are used for Qwen2VL/Aya‑Vision.
|
23 |
+
"""
|
24 |
+
return f'''
|
25 |
+
<div style="display: flex; align-items: center;">
|
26 |
+
<span style="margin-right: 10px; font-size: 14px;">{label}</span>
|
27 |
+
<div style="width: 110px; height: 5px; background-color: {secondary_color}; border-radius: 2px; overflow: hidden;">
|
28 |
+
<div style="width: 100%; height: 100%; background-color: {primary_color}; animation: loading 1.5s linear infinite;"></div>
|
29 |
+
</div>
|
30 |
+
</div>
|
31 |
+
<style>
|
32 |
+
@keyframes loading {{
|
33 |
+
0% {{ transform: translateX(-100%); }}
|
34 |
+
100% {{ transform: translateX(100%); }}
|
35 |
+
}}
|
36 |
+
</style>
|
37 |
+
'''
|
38 |
+
|
39 |
+
def downsample_video(video_path):
|
40 |
+
"""
|
41 |
+
Downsamples a video file by extracting 10 evenly spaced frames.
|
42 |
+
Returns a list of tuples (PIL.Image, timestamp).
|
43 |
+
"""
|
44 |
+
vidcap = cv2.VideoCapture(video_path)
|
45 |
+
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
|
46 |
+
fps = vidcap.get(cv2.CAP_PROP_FPS)
|
47 |
+
frames = []
|
48 |
+
if total_frames <= 0 or fps <= 0:
|
49 |
+
vidcap.release()
|
50 |
+
return frames
|
51 |
+
frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
|
52 |
+
for i in frame_indices:
|
53 |
+
vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
|
54 |
+
success, image = vidcap.read()
|
55 |
+
if success:
|
56 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
57 |
+
pil_image = Image.fromarray(image)
|
58 |
+
timestamp = round(i / fps, 2)
|
59 |
+
frames.append((pil_image, timestamp))
|
60 |
+
vidcap.release()
|
61 |
+
return frames
|
62 |
+
|
63 |
+
# Model and Processor Setup
|
64 |
+
QV_MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct"
|
65 |
+
qwen_processor = AutoProcessor.from_pretrained(QV_MODEL_ID, trust_remote_code=True)
|
66 |
+
qwen_model = Qwen2VLForConditionalGeneration.from_pretrained(
|
67 |
+
QV_MODEL_ID,
|
68 |
+
trust_remote_code=True,
|
69 |
+
torch_dtype=torch.float16
|
70 |
+
).to("cuda").eval()
|
71 |
+
|
72 |
+
ROLMOCR_MODEL_ID = "reducto/RolmOCR"
|
73 |
+
rolmocr_processor = AutoProcessor.from_pretrained(ROLMOCR_MODEL_ID, trust_remote_code=True)
|
74 |
+
rolmocr_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
75 |
+
ROLMOCR_MODEL_ID,
|
76 |
+
trust_remote_code=True,
|
77 |
+
torch_dtype=torch.bfloat16
|
78 |
+
).to("cuda").eval()
|
79 |
+
|
80 |
+
# Main Inference Function
|
81 |
+
@spaces.GPU
|
82 |
+
def model_inference(message, history, use_rolmocr):
|
83 |
+
text = message["text"].strip()
|
84 |
+
files = message.get("files", [])
|
85 |
+
|
86 |
+
if not text and not files:
|
87 |
+
yield "Error: Please input a text query or provide files (images, videos, PDFs)."
|
88 |
+
return
|
89 |
+
|
90 |
+
# Process files: images, videos, PDFs
|
91 |
+
image_list = []
|
92 |
+
for idx, file in enumerate(files):
|
93 |
+
if file.lower().endswith(".pdf"):
|
94 |
+
try:
|
95 |
+
pdf_images = convert_from_path(file)
|
96 |
+
for page_num, img in enumerate(pdf_images, start=1):
|
97 |
+
label = f"PDF {idx+1} Page {page_num}:"
|
98 |
+
image_list.append((label, img))
|
99 |
+
except Exception as e:
|
100 |
+
yield f"Error converting PDF: {str(e)}"
|
101 |
+
return
|
102 |
+
elif file.lower().endswith((".mp4", ".avi", ".mov")):
|
103 |
+
frames = downsample_video(file)
|
104 |
+
if not frames:
|
105 |
+
yield "Error: Could not extract frames from the video."
|
106 |
+
return
|
107 |
+
for frame, timestamp in frames:
|
108 |
+
label = f"Video {idx+1} Frame {timestamp}:"
|
109 |
+
image_list.append((label, frame))
|
110 |
+
else:
|
111 |
+
try:
|
112 |
+
img = load_image(file)
|
113 |
+
label = f"Image {idx+1}:"
|
114 |
+
image_list.append((label, img))
|
115 |
+
except Exception as e:
|
116 |
+
yield f"Error loading image: {str(e)}"
|
117 |
+
return
|
118 |
+
|
119 |
+
# Build content list
|
120 |
+
content = [{"type": "text", "text": text}]
|
121 |
+
for label, img in image_list:
|
122 |
+
content.append({"type": "text", "text": label})
|
123 |
+
content.append({"type": "image", "image": img})
|
124 |
+
|
125 |
+
messages = [{"role": "user", "content": content}]
|
126 |
+
|
127 |
+
# Select processor and model
|
128 |
+
if use_rolmocr:
|
129 |
+
processor = rolmocr_processor
|
130 |
+
model = rolmocr_model
|
131 |
+
model_name = "RolmOCR"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
else:
|
133 |
+
processor = qwen_processor
|
134 |
+
model = qwen_model
|
135 |
+
model_name = "Qwen2VL OCR"
|
136 |
+
|
137 |
+
prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
138 |
+
all_images = [item["image"] for item in content if item["type"] == "image"]
|
139 |
+
inputs = processor(
|
140 |
+
text=[prompt_full],
|
141 |
+
images=all_images if all_images else None,
|
142 |
+
return_tensors="pt",
|
143 |
+
padding=True,
|
144 |
+
).to("cuda")
|
145 |
+
|
146 |
+
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
147 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
148 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
149 |
+
thread.start()
|
150 |
+
buffer = ""
|
151 |
+
yield progress_bar_html(f"Processing with {model_name}")
|
152 |
+
for new_text in streamer:
|
153 |
+
buffer += new_text
|
154 |
+
buffer = buffer.replace("<|im_end|>", "")
|
155 |
+
time.sleep(0.01)
|
156 |
+
yield buffer
|
157 |
+
|
158 |
+
# Gradio Interface
|
159 |
+
examples = [
|
160 |
+
[{"text": "OCR the Text in the Image", "files": ["rolm/1.jpeg"]}],
|
161 |
+
[{"text": "Explain the Ad in Detail", "files": ["examples/videoplayback.mp4"]}],
|
162 |
+
[{"text": "OCR the Image", "files": ["rolm/3.jpeg"]}],
|
163 |
+
[{"text": "Extract as JSON table from the table", "files": ["examples/4.jpg"]}],
|
164 |
+
]
|
165 |
+
|
166 |
+
demo = gr.ChatInterface(
|
167 |
+
fn=model_inference,
|
168 |
+
description="# **Multimodal OCR with Model Selection**",
|
169 |
+
examples=examples,
|
170 |
+
textbox=gr.MultimodalTextbox(
|
171 |
+
label="Query Input",
|
172 |
+
file_types=["image", "video", "pdf"],
|
173 |
+
file_count="multiple",
|
174 |
+
placeholder="Input your query and optionally upload image(s), video(s), or PDF(s). Select the model using the checkbox."
|
175 |
+
),
|
176 |
+
stop_btn="Stop Generation",
|
177 |
+
multimodal=True,
|
178 |
+
cache_examples=False,
|
179 |
+
additional_inputs=[gr.Checkbox(label="Use RolmOCR", value=True)],
|
180 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
+
demo.launch(debug=True)
|