File size: 5,252 Bytes
0cfb4a5 d4fba6d 0dec378 de6051a 0dec378 0a67e9a a484b84 d4fba6d 2fc432b 1a52ee5 0dec378 219d097 471c590 0dec378 79f1585 e3be785 20de417 e3be785 2fc432b 0198afd 1a52ee5 f01cd54 2f35681 2fc432b e3be785 f01cd54 e3be785 2fc432b 471c590 2fc432b e3be785 79f1585 e3be785 165b2f6 3b4ee8c 5e03798 2f35681 5e03798 f01cd54 5e03798 0198afd 5e03798 f01cd54 5e03798 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
import requests
import re
import asyncio
from PIL import Image
from gradio_client import Client, handle_file
from huggingface_hub import login
from gradio_imageslider import ImageSlider
translator = Translator()
HF_TOKEN = os.environ.get("HF_TOKEN")
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
MAX_SEED = np.iinfo(np.int32).max
CSS = "footer { visibility: hidden; }"
JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
def enable_lora(lora_add, basemodel):
return basemodel if not lora_add else lora_add
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = str(translator.translate(prompt, 'English')) + "," + lora_word
client = AsyncInferenceClient()
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
def get_clarity_upscale(prompt, img_path, upscale_factor):
client = Client("jbilcke-hf/clarity-upscaler")
result = client.predict(
img_path,
prompt,
"",
upscale_factor,
1,
3,
3,
"16",
"16",
"epicrealism_naturalSinRC1VAE.safetensors [84d76a0328]",
"DPM++ 2M Karras",
1,
3,
True,
3,
"Hello!!",
"Hello!!",
api_name="/predict"
)
print(result)
return result
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, upscaler_choice):
model = lora_model
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
image_path = "temp_image.png"
image.save(image_path)
if process_upscale:
if upscaler_choice == "FineGrain":
upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
elif upscaler_choice == "Upscaler Clarity":
upscale_image = get_clarity_upscale(prompt, image_path, upscale_factor)
else:
upscale_image = image_path
return [image_path, upscale_image]
def get_upscale_finegrain(prompt, img_path, upscale_factor):
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
return result[1]
css = """
#col-container{
margin: 0 auto;
max-width: 1024px;
}
"""
with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("Flux Upscaled +LORA")
with gr.Row():
with gr.Column(scale=1.5):
output_res = ImageSlider(label="Flux / Upscaled")
with gr.Column(scale=0.8):
prompt = gr.Textbox(label="Prompt")
basemodel_choice = gr.Dropdown(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
lora_model_choice = gr.Dropdown(label="LORA Model", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"])
process_lora = gr.Checkbox(label="Process LORA", value=True)
upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 4, 8], value=2, scale=2)
process_upscale = gr.Checkbox(label="Process Upscale", value=False)
upscaler_choice = gr.Radio(label="Upscaler", choices=["FineGrain", "Upscaler Clarity"], value="FineGrain")
with gr.Accordion(label="Advanced Options", open=False):
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=512)
scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24)
seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
submit_btn = gr.Button("Submit", scale=1)
submit_btn.click(
fn=lambda: None,
inputs=None,
outputs=[output_res],
queue=False
).then(
fn=gen,
inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora, upscaler_choice],
outputs=[output_res]
)
demo.launch() |