Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,7 +10,7 @@ from diffusers import FluxPipeline
|
|
| 10 |
from translatepy import Translator
|
| 11 |
|
| 12 |
# -----------------------------------------------------------------------------
|
| 13 |
-
# CONFIGURATION
|
| 14 |
# -----------------------------------------------------------------------------
|
| 15 |
config = {
|
| 16 |
"model_id": "black-forest-labs/FLUX.1-dev",
|
|
@@ -60,15 +60,18 @@ def enable_lora(lora_add: str):
|
|
| 60 |
# Function to generate an image from a prompt
|
| 61 |
# -----------------------------------------------------------------------------
|
| 62 |
@spaces.GPU()
|
| 63 |
-
def generate_image(
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
| 67 |
pipe.to(device)
|
| 68 |
seed = random.randint(0, config["max_seed"]) if seed == -1 else int(seed)
|
| 69 |
prompt_english = str(translator.translate(prompt, "English"))
|
| 70 |
full_prompt = f"{prompt_english} {lora_word}"
|
| 71 |
generator = torch.Generator().manual_seed(seed)
|
|
|
|
| 72 |
result = pipe(
|
| 73 |
prompt=full_prompt, height=height, width=width, guidance_scale=guidance_scale,
|
| 74 |
output_type="pil", num_inference_steps=steps, num_images_per_prompt=nums,
|
|
@@ -88,6 +91,9 @@ example_prompts = [
|
|
| 88 |
|
| 89 |
with gr.Blocks(css=config["css"]) as demo:
|
| 90 |
gr.HTML("<h1><center>BR METAVERSO - Avatar Generator</center></h1>")
|
|
|
|
|
|
|
|
|
|
| 91 |
with gr.Row():
|
| 92 |
with gr.Column(scale=4):
|
| 93 |
gallery = gr.Gallery(label="Flux Generated Image", columns=1, preview=True, height=600)
|
|
@@ -106,7 +112,21 @@ with gr.Blocks(css=config["css"]) as demo:
|
|
| 106 |
load_lora_btn = gr.Button(value="Load LoRA", variant="secondary")
|
| 107 |
|
| 108 |
gr.Examples(examples=example_prompts, inputs=[prompt_input, lora_word_text, lora_scale_slider], cache_examples=False, examples_per_page=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
load_lora_btn.click(fn=enable_lora, inputs=[lora_add_text], outputs=lora_add_text)
|
| 110 |
-
generate_btn.click(fn=generate_image, inputs=[prompt_input, lora_word_text, lora_scale_slider, width_slider, height_slider, guidance_slider, steps_slider, seed_slider, nums_slider], outputs=[gallery, seed_slider], api_name="run")
|
| 111 |
|
| 112 |
-
demo.queue().launch()
|
|
|
|
| 10 |
from translatepy import Translator
|
| 11 |
|
| 12 |
# -----------------------------------------------------------------------------
|
| 13 |
+
# CONFIGURATION
|
| 14 |
# -----------------------------------------------------------------------------
|
| 15 |
config = {
|
| 16 |
"model_id": "black-forest-labs/FLUX.1-dev",
|
|
|
|
| 60 |
# Function to generate an image from a prompt
|
| 61 |
# -----------------------------------------------------------------------------
|
| 62 |
@spaces.GPU()
|
| 63 |
+
def generate_image(
|
| 64 |
+
prompt: str, lora_word: str, lora_scale: float = config["default_loRa_scale"],
|
| 65 |
+
width: int = config["default_width"], height: int = config["default_height"],
|
| 66 |
+
guidance_scale: float = config["default_guidance_scale"], steps: int = config["default_steps"],
|
| 67 |
+
seed: int = -1, nums: int = 1
|
| 68 |
+
):
|
| 69 |
pipe.to(device)
|
| 70 |
seed = random.randint(0, config["max_seed"]) if seed == -1 else int(seed)
|
| 71 |
prompt_english = str(translator.translate(prompt, "English"))
|
| 72 |
full_prompt = f"{prompt_english} {lora_word}"
|
| 73 |
generator = torch.Generator().manual_seed(seed)
|
| 74 |
+
|
| 75 |
result = pipe(
|
| 76 |
prompt=full_prompt, height=height, width=width, guidance_scale=guidance_scale,
|
| 77 |
output_type="pil", num_inference_steps=steps, num_images_per_prompt=nums,
|
|
|
|
| 91 |
|
| 92 |
with gr.Blocks(css=config["css"]) as demo:
|
| 93 |
gr.HTML("<h1><center>BR METAVERSO - Avatar Generator</center></h1>")
|
| 94 |
+
|
| 95 |
+
processing_status = gr.Markdown("**🟢 Ready**", visible=True) # Status indicator
|
| 96 |
+
|
| 97 |
with gr.Row():
|
| 98 |
with gr.Column(scale=4):
|
| 99 |
gallery = gr.Gallery(label="Flux Generated Image", columns=1, preview=True, height=600)
|
|
|
|
| 112 |
load_lora_btn = gr.Button(value="Load LoRA", variant="secondary")
|
| 113 |
|
| 114 |
gr.Examples(examples=example_prompts, inputs=[prompt_input, lora_word_text, lora_scale_slider], cache_examples=False, examples_per_page=4)
|
| 115 |
+
|
| 116 |
+
# Ensuring processing status updates correctly
|
| 117 |
+
def update_status():
|
| 118 |
+
return "**⏳ Processing...**"
|
| 119 |
+
|
| 120 |
+
generate_btn.click(fn=update_status, inputs=[], outputs=[processing_status]).then(
|
| 121 |
+
fn=generate_image,
|
| 122 |
+
inputs=[prompt_input, lora_word_text, lora_scale_slider, width_slider, height_slider, guidance_slider, steps_slider, seed_slider, nums_slider],
|
| 123 |
+
outputs=[gallery, seed_slider]
|
| 124 |
+
).then(
|
| 125 |
+
fn=lambda: "**✅ Done!**",
|
| 126 |
+
inputs=[],
|
| 127 |
+
outputs=[processing_status]
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
load_lora_btn.click(fn=enable_lora, inputs=[lora_add_text], outputs=lora_add_text)
|
|
|
|
| 131 |
|
| 132 |
+
demo.queue().launch()
|