File size: 2,621 Bytes
b502a48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
import spaces
import torch
import re

# Load the model and processor
model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval()
processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")

def modify_caption(caption: str) -> str:
    """
    Removes specific prefixes from captions.
    Args:
        caption (str): A string containing a caption.
    Returns:
        str: The caption with the prefix removed if it was present.
    """
    prefix_substrings = [
        ('captured from ', ''),
        ('captured at ', '')
    ]
    
    pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
    replacers = {opening: replacer for opening, replacer in prefix_substrings}
    
    def replace_fn(match):
        return replacers[match.group(0)]
    
    return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)

def create_captions_rich(images):
    """
    Generates captions for input images.

    Args:
        images (list): List of images to generate captions for.

    Returns:
        list: List of captions, one for each input image.
    """
    captions = []
    for image in images:
        try:
            prompt = "caption en"
            model_inputs = processor(text=prompt, images=image, return_tensors="pt").to("cpu")
            input_len = model_inputs["input_ids"].shape[-1]

            with torch.inference_mode():
                generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False)
                generation = generation[0][input_len:]
                decoded = processor.decode(generation, skip_special_tokens=True)

                modified_caption = modify_caption(decoded)
                captions.append(modified_caption)
        except Exception as e:
            captions.append(f"Error processing image: {e}")
    return captions

css = """
  #mkd {
    height: 500px; 
    overflow: auto; 
    border: 8px solid #ccc; 
  }
"""

with gr.Blocks(css=css) as demo:
  gr.HTML("<h1><center>Image caption using finetuned PaliGemma on SD3 generation data.<center><h1>")
  with gr.Tab(label="Img2Prompt for SD3"):
    with gr.Row():
      with gr.Column():
        input_img = gr.Image(label="Input Image",  tool="select", type="pil",  interactive=True)
        submit_btn = gr.Button(value="Start")
      output = gr.Textbox(label="Prompt", lines=10, interactive=True)

    submit_btn.click(create_captions_rich, [input_img], [output])

demo.launch(debug=True)