File size: 4,702 Bytes
07b8c5e
 
 
 
 
d4f55c7
53c8204
07b8c5e
 
 
 
 
 
 
 
9c246bf
 
 
 
 
 
 
 
 
af35943
9c246bf
 
d226d4e
d4f55c7
07b8c5e
 
 
d4f55c7
07b8c5e
 
 
 
 
 
 
 
d4f55c7
404edfc
d4f55c7
 
 
404edfc
d4f55c7
404edfc
d4f55c7
404edfc
 
 
07b8c5e
 
 
 
 
d4f55c7
07b8c5e
 
 
 
 
 
 
404edfc
 
af35943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404edfc
af35943
d4f55c7
 
 
07b8c5e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
import requests
import io
from PIL import Image
import json
from image_processing import downscale_image, limit_colors, convert_to_grayscale, convert_to_black_and_white
import logging

class SomeClass:
    def __init__(self):
        self.images = []

with open('loras.json', 'r') as f:
    loras = json.load(f)

def update_selection(selected_state: gr.SelectData):
    logging.debug(f"Inside update_selection, selected_state: {selected_state}")
    selected_lora_index = selected_state.index
    selected_lora = loras[selected_lora_index]
    new_placeholder = f"Type a prompt for {selected_lora['title']}"
    lora_repo = selected_lora["repo"]
    updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
    return (
        gr.update(placeholder=new_placeholder),
        updated_text,
        selected_state
    )

def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)):
    selected_lora_index = selected_state.index
    selected_lora = loras[selected_lora_index]
    api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
    payload = {"inputs": f"{prompt} {selected_lora['trigger_word']}", "parameters": {"negative_prompt": "bad art, ugly, watermark, deformed"}}
    response = requests.post(api_url, json=payload)
    if response.status_code == 200:
        original_image = Image.open(io.BytesIO(response.content))
        processed = SomeClass()
        processed.images = [original_image]
        refined_image = processed.images[-1]
        return original_image, refined_image

def apply_post_processing(image, downscale, limit_colors, grayscale, black_and_white):
    processed_image = image.copy()
    if downscale > 1:
        processed_image = downscale_image(processed_image, downscale)
    if limit_colors:
        processed_image = limit_colors(processed_image)
    if grayscale:
        processed_image = convert_to_grayscale(processed_image)
    if black_and_white:
        processed_image = convert_to_black_and_white(processed_image)
    return processed_image

with gr.Blocks() as app:
    title = gr.Markdown("# artificialguybr LoRA portfolio")
    description = gr.Markdown("### This is a Pixel Art Generator using SD Loras.")
    selected_state = gr.State()
    with gr.Row():
        gallery = gr.Gallery([(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False, columns=3)
        with gr.Column():
            prompt_title = gr.Markdown("### Click on a LoRA in the gallery to create with it")
            selected_info = gr.Markdown("")
            with gr.Row():
                prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
                button = gr.Button("Run")
            result = gr.Image(interactive=False, label="Generated Image")
            refined_result = gr.Image(interactive=False, label="Refined Generated Image")
            post_processed_result = gr.Image(interactive=False, label="Post-Processed Image")
            
            with gr.Tabs():
                with gr.TabItem("Color"):
                    enable_color_limit = gr.Checkbox(label="Enable", value=False)
                    number_of_colors = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
                with gr.TabItem("Grayscale"):
                    is_grayscale = gr.Checkbox(label="Enable", value=False)
                    number_of_shades = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
                with gr.TabItem("Black and white"):
                    is_black_and_white = gr.Checkbox(label="Enable", value=False)
                    black_and_white_threshold = gr.Slider(label="Threshold", minimum=1, maximum=256, step=1, value=128)
                with gr.TabItem("Custom color palette"):
                    use_color_palette = gr.Checkbox(label="Enable", value=False)
                    palette_image = gr.Image(label="Color palette image", type="pil")
                    palette_colors = gr.Slider(label="Palette Size (only for complex images)", minimum=1, maximum=256, step=1, value=16)
                    
            post_process_button = gr.Button("Apply Post-Processing")

    gallery.select(update_selection, outputs=[prompt, selected_info, selected_state])
    prompt.submit(fn=run_lora, inputs=[prompt, selected_state], outputs=[result, refined_result])
    post_process_button.click(fn=apply_post_processing, inputs=[refined_result, downscale, limit_colors, grayscale, black_and_white], outputs=[post_processed_result])

app.queue(max_size=20, concurrency_count=5)
app.launch()