Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -72,8 +72,8 @@ model_a = AutoModelForImageTextToText.from_pretrained(
|
|
72 |
torch_dtype=torch.float16
|
73 |
).to(device).eval()
|
74 |
|
75 |
-
# Load
|
76 |
-
MODEL_ID_W = "
|
77 |
processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True)
|
78 |
model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
79 |
MODEL_ID_W,
|
@@ -134,7 +134,7 @@ def generate_image(model_name: str, text: str, image: Image.Image,
|
|
134 |
elif model_name == "Aya-Vision-8B":
|
135 |
processor = processor_a
|
136 |
model = model_a
|
137 |
-
elif model_name == "
|
138 |
processor = processor_w
|
139 |
model = model_w
|
140 |
else:
|
@@ -195,7 +195,7 @@ def generate_video(model_name: str, text: str, video_path: str,
|
|
195 |
elif model_name == "Aya-Vision-8B":
|
196 |
processor = processor_a
|
197 |
model = model_a
|
198 |
-
elif model_name == "
|
199 |
processor = processor_w
|
200 |
model = model_w
|
201 |
else:
|
@@ -310,14 +310,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
310 |
with gr.Accordion("(Result.md)", open=False):
|
311 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
312 |
model_choice = gr.Radio(
|
313 |
-
choices=["Nanonets-OCR-s", "Qwen2-VL-OCR-2B",
|
314 |
-
"
|
315 |
label="Select Model",
|
316 |
value="Nanonets-OCR-s"
|
317 |
)
|
318 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
|
319 |
gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
|
320 |
-
|
321 |
gr.Markdown("> [Qwen2-VL-OCR-2B](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
|
322 |
gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
|
323 |
gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")
|
|
|
72 |
torch_dtype=torch.float16
|
73 |
).to(device).eval()
|
74 |
|
75 |
+
# Load olmOCR-7B-0725
|
76 |
+
MODEL_ID_W = "allenai/olmOCR-7B-0725"
|
77 |
processor_w = AutoProcessor.from_pretrained(MODEL_ID_W, trust_remote_code=True)
|
78 |
model_w = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
79 |
MODEL_ID_W,
|
|
|
134 |
elif model_name == "Aya-Vision-8B":
|
135 |
processor = processor_a
|
136 |
model = model_a
|
137 |
+
elif model_name == "olmOCR-7B-0725":
|
138 |
processor = processor_w
|
139 |
model = model_w
|
140 |
else:
|
|
|
195 |
elif model_name == "Aya-Vision-8B":
|
196 |
processor = processor_a
|
197 |
model = model_a
|
198 |
+
elif model_name == "olmOCR-7B-0725":
|
199 |
processor = processor_w
|
200 |
model = model_w
|
201 |
else:
|
|
|
310 |
with gr.Accordion("(Result.md)", open=False):
|
311 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
312 |
model_choice = gr.Radio(
|
313 |
+
choices=["olmOCR-7B-0725", "Nanonets-OCR-s", "Qwen2-VL-OCR-2B",
|
314 |
+
"RolmOCR-7B", "Aya-Vision-8B"],
|
315 |
label="Select Model",
|
316 |
value="Nanonets-OCR-s"
|
317 |
)
|
318 |
gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-OCR/discussions)")
|
319 |
gr.Markdown("> [Nanonets-OCR-s](https://huggingface.co/nanonets/Nanonets-OCR-s): nanonets-ocr-s is a powerful, state-of-the-art image-to-markdown ocr model that goes far beyond traditional text extraction. it transforms documents into structured markdown with intelligent content recognition and semantic tagging.")
|
320 |
+
gr.Markdown("> [olmOCR-7B-0725](https://huggingface.co/allenai/olmOCR-7B-0725): Updating shortly...")
|
321 |
gr.Markdown("> [Qwen2-VL-OCR-2B](https://huggingface.co/prithivMLmods/Qwen2-VL-OCR-2B-Instruct): qwen2-vl-ocr-2b-instruct model is a fine-tuned version of qwen2-vl-2b-instruct, tailored for tasks that involve [messy] optical character recognition (ocr), image-to-text conversion, and math problem solving with latex formatting.")
|
322 |
gr.Markdown("> [RolmOCR](https://huggingface.co/reducto/RolmOCR): rolmocr, high-quality, openly available approach to parsing pdfs and other complex documents optical character recognition. it is designed to handle a wide range of document types, including scanned documents, handwritten text, and complex layouts.")
|
323 |
gr.Markdown("> [Aya-Vision](https://huggingface.co/CohereLabs/aya-vision-8b): cohere labs aya vision 8b is an open weights research release of an 8-billion parameter model with advanced capabilities optimized for a variety of vision-language use cases, including ocr, captioning, visual reasoning, summarization, question answering, code, and more.")
|