Spaces:
Runtime error
Runtime error
File size: 6,024 Bytes
07b8c5e d4f55c7 53c8204 07b8c5e 9c246bf af35943 9c246bf d226d4e d4f55c7 07b8c5e d4f55c7 07b8c5e d4f55c7 404edfc d4f55c7 404edfc d4f55c7 404edfc d4f55c7 404edfc 07b8c5e ddf5341 07b8c5e 7240ddc ddf5341 07b8c5e ddf5341 07b8c5e ddf5341 07b8c5e 404edfc 8b235cc 5f9b2c5 86f908d f39f62b 86f908d f39f62b ddf5341 f39f62b 0ff8cd7 6589d49 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import gradio as gr
import requests
import io
from PIL import Image
import json
from image_processing import downscale_image, limit_colors, convert_to_grayscale, convert_to_black_and_white
import logging
class SomeClass:
def __init__(self):
self.images = []
with open('loras.json', 'r') as f:
loras = json.load(f)
def update_selection(selected_state: gr.SelectData):
logging.debug(f"Inside update_selection, selected_state: {selected_state}")
selected_lora_index = selected_state.index
selected_lora = loras[selected_lora_index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
return (
gr.update(placeholder=new_placeholder),
updated_text,
selected_state
)
def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)):
selected_lora_index = selected_state.index
selected_lora = loras[selected_lora_index]
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
payload = {"inputs": f"{prompt} {selected_lora['trigger_word']}", "parameters": {"negative_prompt": "bad art, ugly, watermark, deformed"}}
response = requests.post(api_url, json=payload)
if response.status_code == 200:
original_image = Image.open(io.BytesIO(response.content))
processed = SomeClass()
processed.images = [original_image]
refined_image = processed.images[-1]
return original_image, refined_image
def apply_post_processing(image, downscale, limit_colors, grayscale, black_and_white):
processed_image = image.copy()
if downscale > 1:
processed_image = downscale_image(processed_image, downscale)
if limit_colors:
processed_image = limit_colors(processed_image)
if grayscale:
processed_image = convert_to_grayscale(processed_image)
if black_and_white:
processed_image = convert_to_black_and_white(processed_image)
return processed_image
with gr.Blocks() as app:
title = gr.Markdown("# artificialguybr LoRA portfolio")
description = gr.Markdown("### This is a Pixel Art Generator using SD Loras.")
selected_state = gr.State()
with gr.Row():
gallery = gr.Gallery([(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False, columns=1)
with gr.Column():
prompt_title = gr.Markdown("### Click on a LoRA in the gallery to create with it")
selected_info = gr.Markdown("")
with gr.Row():
prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
button = gr.Button("Run")
result = gr.Image(interactive=False, label="Generated Image")
post_processed_result = gr.Image(interactive=False, label="Post-Processed Image")
# Accordion moved here, inside the same gr.Blocks context
with gr.Accordion(label="Pixel art", open=True):
with gr.Row():
enabled = gr.Checkbox(label="Enable", value=False)
downscale = gr.Slider(label="Downscale", minimum=1, maximum=32, step=2, value=8)
need_rescale = gr.Checkbox(label="Rescale to original size", value=True)
with gr.Tabs():
with gr.TabItem("Color"):
enable_color_limit = gr.Checkbox(label="Enable", value=False)
palette_size_color = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
quantization_methods_color = gr.Radio(choices=["Median Cut", "Maximum Coverage", "Fast Octree"], label="Colors Quantization Method")
dither_methods_color = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method")
k_means_color = gr.Checkbox(label="Enable k-means for color quantization", value=True)
with gr.TabItem("Grayscale"):
enable_grayscale = gr.Checkbox(label="Enable", value=False)
palette_size_gray = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
quantization_methods_gray = gr.Radio(choices=["Median Cut", "Maximum Coverage", "Fast Octree"], label="Colors Quantization Method")
dither_methods_gray = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method")
k_means_gray = gr.Checkbox(label="Enable k-means for color quantization", value=True)
with gr.TabItem("Black and white"):
enable_black_and_white = gr.Checkbox(label="Enable", value=False)
inverse_black_and_white = gr.Checkbox(label="Inverse", value=False)
threshold_black_and_white = gr.Slider(label="Threshold", minimum=1, maximum=256, step=1, value=128)
with gr.TabItem("Custom color palette"):
enable_custom_palette = gr.Checkbox(label="Enable", value=False)
palette_image = gr.Image(label="Color palette image", type="pil")
palette_size_custom = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
dither_methods_custom = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method")
post_process_button = gr.Button("Apply Post-Processing")
# The rest of your code for setting up the app
gallery.select(update_selection, outputs=[prompt, selected_info, selected_state])
prompt.submit(fn=run_lora, inputs=[prompt, selected_state], outputs=[result, post_processed_result])
post_process_button.click(fn=apply_post_processing, inputs=[post_processed_result, downscale, enable_color_limit, enable_grayscale, enable_black_and_white], outputs=[post_processed_result])
app.queue(max_size=20, concurrency_count=5)
app.launch()
|