|
import gradio as gr |
|
from transformers import AutoProcessor, AutoModelForCausalLM |
|
import re |
|
from PIL import Image |
|
import os |
|
import numpy as np |
|
import spaces |
|
import subprocess |
|
import torch |
|
|
|
|
|
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
'PJMixers-Images/Florence-2-base-Castollux-v0.5', |
|
trust_remote_code=True |
|
).eval() |
|
processor = AutoProcessor.from_pretrained( |
|
'PJMixers-Images/Florence-2-base-Castollux-v0.5', |
|
trust_remote_code=True |
|
) |
|
|
|
TITLE = "# [PJMixers-Images/Florence-2-base-Castollux-v0.5](https://huggingface.co/PJMixers-Images/Florence-2-base-Castollux-v0.5)" |
|
|
|
|
|
@spaces.GPU |
|
def process_image(image): |
|
if isinstance(image, np.ndarray): |
|
image = Image.fromarray(image) |
|
elif isinstance(image, str): |
|
image = Image.open(image) |
|
if image.mode != "RGB": |
|
image = image.convert("RGB") |
|
|
|
inputs = processor(text="<CAPTION>", images=image, return_tensors="pt") |
|
generated_ids = model.generate( |
|
input_ids=inputs["input_ids"], |
|
pixel_values=inputs["pixel_values"], |
|
max_new_tokens=1024, |
|
num_beams=5, |
|
do_sample=True |
|
) |
|
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] |
|
|
|
return processor.post_process_generation(generated_text, task="<CAPTION>", image_size=(image.width, image.height)) |
|
|
|
|
|
def extract_frames(image_path, output_folder): |
|
with Image.open(image_path) as img: |
|
base_name = os.path.splitext(os.path.basename(image_path))[0] |
|
frame_paths = [] |
|
|
|
try: |
|
for i in range(0, img.n_frames): |
|
img.seek(i) |
|
frame_path = os.path.join(output_folder, f"{base_name}_frame_{i:03d}.png") |
|
img.save(frame_path) |
|
frame_paths.append(frame_path) |
|
except EOFError: |
|
pass |
|
|
|
return frame_paths |
|
|
|
|
|
def process_folder(folder_path): |
|
if not os.path.isdir(folder_path): |
|
return "Invalid folder path." |
|
|
|
processed_files = [] |
|
skipped_files = [] |
|
for filename in os.listdir(folder_path): |
|
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp', '.heic')): |
|
image_path = os.path.join(folder_path, filename) |
|
txt_filename = os.path.splitext(filename)[0] + '.txt' |
|
txt_path = os.path.join(folder_path, txt_filename) |
|
|
|
|
|
if os.path.exists(txt_path): |
|
skipped_files.append(f"Skipped {filename} (text file already exists)") |
|
continue |
|
|
|
|
|
with Image.open(image_path) as img: |
|
if getattr(img, "is_animated", False) and img.n_frames > 1: |
|
|
|
frames = extract_frames(image_path, folder_path) |
|
for frame_path in frames: |
|
frame_txt_filename = os.path.splitext(os.path.basename(frame_path))[0] + '.txt' |
|
frame_txt_path = os.path.join(folder_path, frame_txt_filename) |
|
|
|
|
|
if os.path.exists(frame_txt_path): |
|
skipped_files.append(f"Skipped {os.path.basename(frame_path)} (text file already exists)") |
|
continue |
|
|
|
caption = process_image(frame_path) |
|
|
|
with open(frame_txt_path, 'w', encoding='utf-8') as f: |
|
f.write(caption) |
|
|
|
processed_files.append(f"Processed {os.path.basename(frame_path)} -> {frame_txt_filename}") |
|
else: |
|
|
|
caption = process_image(image_path) |
|
|
|
with open(txt_path, 'w', encoding='utf-8') as f: |
|
f.write(caption) |
|
|
|
processed_files.append(f"Processed {filename} -> {txt_filename}") |
|
|
|
result = "\n".join(processed_files + skipped_files) |
|
|
|
return result if result else "No image files found or all files were skipped in the specified folder." |
|
|
|
css = """ |
|
#output { height: 500px; overflow: auto; border: 1px solid #ccc; } |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown(TITLE) |
|
|
|
with gr.Tab(label="Single Image Processing"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
input_img = gr.Image(label="Input Picture") |
|
submit_btn = gr.Button(value="Submit") |
|
with gr.Column(): |
|
output_text = gr.Textbox(label="Output Text") |
|
|
|
gr.Examples( |
|
[ |
|
["eval_img_1.jpg"], |
|
["eval_img_2.jpg"], |
|
["eval_img_3.jpg"], |
|
["eval_img_4.jpg"], |
|
["eval_img_5.jpg"], |
|
["eval_img_6.jpg"], |
|
["eval_img_7.png"], |
|
["eval_img_8.jpg"] |
|
], |
|
inputs=[input_img], |
|
outputs=[output_text], |
|
fn=process_image, |
|
label='Try captioning on below examples' |
|
) |
|
|
|
submit_btn.click(process_image, [input_img], [output_text]) |
|
|
|
with gr.Tab(label="Batch Processing"): |
|
with gr.Row(): |
|
folder_input = gr.Textbox(label="Input Folder Path") |
|
batch_submit_btn = gr.Button(value="Process Folder") |
|
batch_output = gr.Textbox(label="Batch Processing Results", lines=10) |
|
|
|
batch_submit_btn.click(process_folder, [folder_input], [batch_output]) |
|
|
|
demo.launch(debug=True) |