Spaces:
Sleeping
Sleeping
import torch | |
from diffusers import StableDiffusionInstructPix2PixPipeline | |
import gradio as gr | |
from PIL import Image | |
import random | |
# Load the InstructPix2Pix model | |
model_id = "timbrooks/instruct-pix2pix" | |
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16) | |
pipe = pipe.to("cuda") | |
# Initialize a random seed | |
seed = random.randint(0, 10000) | |
# Function to reset the seed | |
def change_style(): | |
global seed | |
seed = random.randint(0, 10000) | |
return f"Seed changed to: {seed}" | |
# Changign the walls' color function | |
def change_color(image,color): | |
# Construct the furniture prompt | |
prompt = f"paint the walls with {color} color " | |
# Text CFG (guidance_scale) controls how strongly the model follows the prompt | |
text_cfg = 7.5 | |
# Image CFG: Although not explicitly part of InstructPix2Pix, you can "simulate" image preservation | |
# by lowering the impact of the guidance. Here, we assume lower guidance impacts image preservation. | |
image_cfg = 1.5 | |
# Apply the edit using InstructPix2Pix, with text CFG and image CFG influencing the guidance scale | |
edited_image = pipe( | |
prompt=prompt, | |
image=image, | |
num_inference_steps=70, # Number of diffusion steps | |
guidance_scale=text_cfg, # Text CFG for following the prompt | |
image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content | |
generator=torch.manual_seed(seed) # Random seed for consistency | |
).images[0] | |
return edited_image | |
# General image editing function | |
def edit_image(image, instruction): | |
# Text CFG (guidance_scale) controls how strongly the model follows the prompt | |
text_cfg = 12.0 | |
# Image CFG: Simulated value for preserving the original image content | |
image_cfg = 1.5 | |
# Apply the edit using InstructPix2Pix, with text CFG and simulated image CFG | |
edited_image = pipe( | |
prompt=instruction, | |
image=image, | |
num_inference_steps=70, # Number of diffusion steps | |
guidance_scale=text_cfg, # Text CFG for following the prompt | |
image_guidance_scale=image_cfg, # Simulated Image CFG to preserve image content | |
generator=torch.manual_seed(seed) # Random seed for consistency | |
).images[0] | |
return edited_image | |
# Gradio interface for image editing | |
def image_interface(): | |
with gr.Blocks() as demo_color: | |
gr.Markdown("## Furniture Adding App") | |
# Image upload | |
image_input = gr.Image(type="pil", label="Upload Room Image") | |
# List of common painting colors | |
common_colors = [ | |
"Alabaster", # Off-white | |
"Agreeable Gray", # Warm gray | |
"Sea Salt", # Soft greenish-blue | |
"Pure White", # Bright white | |
"Accessible Beige", # Warm beige | |
"Mindful Gray", # Cool gray | |
"Peppercorn", # Dark charcoal gray | |
"Hale Navy", # Dark navy blue | |
"Tricorn Black", # Pure black | |
"Pale Oak", # Soft taupe | |
"Silver Strand", # Soft blue-gray | |
"Rainwashed", # Light aqua | |
"Orange Burst", # Bright orange | |
"Sunny Yellow", # Bright yellow | |
"Sage Green", # Muted green | |
"Firebrick Red", # Deep red | |
"Lavender", # Soft purple | |
"Sky Blue", # Light blue | |
"Coral", # Vibrant coral | |
] | |
# Dropdown for wall color | |
color_input = gr.Dropdown(common_colors, label="Choose Wall Color") | |
# Display output image | |
result_image = gr.Image(label="Edited Image") | |
# Button to apply the wall color transformation | |
submit_button = gr.Button("Paint the walls") | |
# Define action on button click | |
submit_button.click(fn=change_color, inputs=[image_input, color_input], outputs=result_image) | |
return demo_color | |
# Gradio interface for general image editing | |
def general_editing_interface(): | |
with gr.Blocks() as demo_general: | |
gr.Markdown("## General Image Editing App") | |
# Image upload | |
image_input = gr.Image(type="pil", label="Upload an Image") | |
# Textbox for instruction | |
instruction_input = gr.Textbox(label="Enter the Instruction", placeholder="Describe the changes (e.g., 'Make it snowy')") | |
# Display output image | |
result_image = gr.Image(label="Edited Image") | |
# Button to apply the transformation | |
submit_button = gr.Button("Apply Edit") | |
# Button to change the seed (style) | |
change_style_button = gr.Button("Change the Style") | |
# Output for seed change message | |
seed_output = gr.Textbox(label="Seed Info", interactive=False) | |
# Define action on button click | |
submit_button.click(fn=edit_image, inputs=[image_input, instruction_input], outputs=result_image) | |
change_style_button.click(fn=change_style, outputs=seed_output) | |
return demo_general | |
# Launch both Gradio apps | |
color_app = image_interface() | |
general_editing_app = general_editing_interface() | |
with gr.Blocks() as combined_demo: | |
gr.Markdown("## Select the Application") | |
with gr.Tab("General Image Editing App"): | |
general_editing_app.render() | |
with gr.Tab("Changing The Paint App"): | |
color_app.render() | |
# Launch the combined Gradio app | |
combined_demo.launch() | |