File size: 2,888 Bytes
b502a48
 
8308143
b502a48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13bac9e
 
 
 
 
 
 
b502a48
80b7ee0
b502a48
13bac9e
80b7ee0
b502a48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13bac9e
b502a48
 
 
 
 
 
 
 
 
3b8df03
 
b502a48
 
ebe70b1
b502a48
 
 
 
 
ebe70b1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor
from PIL import Image
import spaces
import torch
import re

model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval()
processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")

def modify_caption(caption: str) -> str:
    """
    Removes specific prefixes from captions.
    Args:
        caption (str): A string containing a caption.
    Returns:
        str: The caption with the prefix removed if it was present.
    """
    prefix_substrings = [
        ('captured from ', ''),
        ('captured at ', '')
    ]
    
    pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
    replacers = {opening: replacer for opening, replacer in prefix_substrings}
    
    def replace_fn(match):
        return replacers[match.group(0)]
    
    return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)

def create_captions_rich(images):
    # Debugging: Print out the type of 'images'
    print(f"Type of 'images': {type(images)}")
    if isinstance(images, tuple):
        print("Received a tuple, expected a file-like object.")
        # If it's a tuple, you can try accessing the first element as an example
        print(f"Type of 'images[0]': {type(images[0])}")

    captions = []
    for image_path in images:
        try:
            # If 'images' is a tuple, you might need to modify this part to extract the image file correctly
            image = Image.open(image_path).convert("RGB")
            prompt = "caption en"
            model_inputs = processor(text=prompt, images=image, return_tensors="pt").to("cpu")
            input_len = model_inputs["input_ids"].shape[-1]

            with torch.inference_mode():
                generation = model.generate(**model_inputs, max_new_tokens=256, do_sample=False)
                generation = generation[0][input_len:]
                decoded = processor.decode(generation, skip_special_tokens=True)

                modified_caption = modify_caption(decoded)
                captions.append(modified_caption)
        except Exception as e:
            captions.append(f"Error processing image: {e}")
    return captions


css = """
  #mkd {
    height: 500px; 
    overflow: auto; 
    border: 8px solid #ccc; 
  }
"""

with gr.Blocks(css=css) as demo:
  gr.HTML("<h1><center>Finetuned PaliGemma for SD3 prompt generation.<center><h1>")
  with gr.Tab(label="Image to Prompt for SD3"):
    with gr.Row():
      with gr.Column():
        input_img = gr.Gallery(label="Input Images", type="pil", interactive=True)
        submit_btn = gr.Button(value="Start")
      output = gr.Textbox(label="Prompt", lines=10, interactive=True)

    submit_btn.click(create_captions_rich, [input_img], [output])

demo.launch(debug=True)