interior-model1 / app.py
Rifaa's picture
Update app.py
6938369 verified
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel, EulerDiscreteScheduler
from jinja2 import Template
import numpy as np
import cv2
from PIL import Image
import time
# Load models from Hugging Face
sd_model_id = "bhoomikagp/sd2-interior-model-version2" ## test
# sd_model_id = "bhoomikagp/sd3-interior-model" ## SD3 model issue loading
controlnet_model_id = "lllyasviel/sd-controlnet-mlsd"
# Load Stable Diffusion pipeline CUDA
scheduler = EulerDiscreteScheduler.from_pretrained(sd_model_id, subfolder="scheduler")
#sd_pipeline = StableDiffusionPipeline.from_pretrained(sd_model_id, torch_dtype=torch.float16,scheduler=scheduler).to("cuda")
# Load ControlNet and Stable Diffusion ControlNet pipeline
"""
controlnet = ControlNetModel.from_pretrained(controlnet_model_id, torch_dtype=torch.float16).to("cuda")
controlnet_pipeline = StableDiffusionControlNetPipeline.from_pretrained(
sd_model_id,
controlnet=controlnet,
scheduler=scheduler,
torch_dtype=torch.float16
).to("cuda")
"""
#sd_model_id = "stabilityai/stable-diffusion-2-1"
#scheduler = DPMSolverMultistepScheduler.from_pretrained(sd_model_id, subfolder="scheduler")
# Check if CUDA is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Initialize the pipeline with appropriate device and dtype
torch_dtype = torch.float16 if device == "cuda" else torch.float32
sd_pipeline = StableDiffusionPipeline.from_pretrained(
sd_model_id,
torch_dtype=torch_dtype,
scheduler=scheduler
).to(device)
# choices lists
option_choices = ["living_room", "bedroom", "kitchen"]
adj_1_choices = [
"spacious", "cozy", "minimalist", "elegant", "modern", "rustic",
"luxurious", "inviting", "airy", "sophisticated", "bright", "warm",
"serene", "chic", "contemporary"
]
architecture_style_choices = [
"modern", "contemporary", "traditional", "industrial", "scandinavian",
"mid-century", "colonial", "art deco", "neo-classical", "mediterranean",
"gothic", "baroque", "japanese", "brutalist", "tropical"
]
aesthetic_choices = [
"bohemian", "vintage", "minimalist", "luxurious", "eclectic",
"mid-century", "art deco", "modern farmhouse", "industrial chic",
"shabby chic", "rustic elegance", "coastal", "urban", "transitional",
"contemporary classic"
]
primary_color_choices = [
"blue", "green", "beige", "grey", "white", "black",
"cream", "brown", "taupe", "burgundy", "mustard",
"terracotta", "olive", "peach", "navy"
]
wood_finish_choices = [
"dark oak", "walnut", "mahogany", "teak", "maple",
"cherry", "pine", "birch", "ash", "rosewood",
"ebony", "cedar", "hickory", "elm", "red oak"
]
wall_color_choices = [
"cream", "off-white", "charcoal", "sage green", "navy blue",
"taupe", "light grey", "soft pink", "mustard yellow", "deep teal",
"warm beige", "pearl white", "slate blue", "coral", "mint green"
]
tiles_choices = [
"marble", "ceramic", "porcelain", "slate", "wooden-look",
"mosaic", "granite", "terracotta", "cement", "quartz",
"limestone", "onyx", "travertine", "glass", "encaustic"
]
# Templates for each room type
templates = {
"living_room": """
High quality, High resolution, Interior, Architecture, Revit, Autocad, Realistic 3D Render, vray, lumion, raytracing,
of a {{ adj_1 }} living room, in {{ architecture_style }} architecture, with {{ aesthetic }} style,
painted in {{ wall_color }} with {{ primary_color }} accents, {{ wood_finish }} wood finishes, and {{ tiles }} flooring.
""",
"bedroom": """
High quality, High resolution, Interior, Architecture, Revit, Autocad, Realistic 3D Render, vray, lumion, raytracing,
of a {{ adj_1 }} bedroom, in {{ architecture_style }} architecture, with {{ aesthetic }} style,
painted in {{ wall_color }} with {{ primary_color }} accents, {{ wood_finish }} wood finishes, and {{ tiles }} flooring.
""",
"kitchen": """
High quality, High resolution, Interior, Architecture, Revit, Autocad, Realistic 3D Render, vray, lumion, raytracing,
of a {{ adj_1 }} kitchen, in {{ architecture_style }} architecture, with {{ aesthetic }} style,
painted in {{ wall_color }} with {{ primary_color }} accents, {{ wood_finish }} cabinetry, and {{ tiles }} flooring.
"""
}
def generate_prompt(option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles):
# Validate inputs
if adj_1 not in adj_1_choices:
raise ValueError(f"Invalid adjective: '{adj_1}'. Accepted options are {adj_1_choices}")
if architecture_style not in architecture_style_choices:
raise ValueError(f"Invalid architecture style: '{architecture_style}'. Accepted options are {architecture_style_choices}")
if aesthetic not in aesthetic_choices:
raise ValueError(f"Invalid aesthetic style: '{aesthetic}'. Accepted options are {aesthetic_choices}")
if primary_color not in primary_color_choices:
raise ValueError(f"Invalid primary color: '{primary_color}'. Accepted options are {primary_color_choices}")
if wood_finish not in wood_finish_choices:
raise ValueError(f"Invalid wood finish: '{wood_finish}'. Accepted options are {wood_finish_choices}")
if wall_color not in wall_color_choices:
raise ValueError(f"Invalid wall color: '{wall_color}'. Accepted options are {wall_color_choices}")
if tiles not in tiles_choices:
raise ValueError(f"Invalid tiles: '{tiles}'. Accepted options are {tiles_choices}")
# Select the template based on the room type option
template_str = templates.get(option.lower())
if not template_str:
raise ValueError(f"Invalid option: '{option}'. Available options are {option_choices}")
# Render the template
template = Template(template_str)
return template.render(
adj_1=adj_1,
architecture_style=architecture_style,
aesthetic=aesthetic,
primary_color=primary_color,
wood_finish=wood_finish,
wall_color=wall_color,
tiles=tiles
)
# Function to generate initial image
def generate_initial_image(n_images=1, option='living_room', adj_1="spacious", architecture_style="modern", aesthetic="minimalist",
primary_color="neutral", wood_finish="oak", wall_color="off-white", tiles="marble"):
# Generate the prompt from choices
gen_prompt = generate_prompt(option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles)
images = [] # List to store generated images along with seeds
for i in range(n_images):
# Generate a random seed for reproducibility
seed = torch.randint(0, 2**32, (1,)).item()
generator = torch.manual_seed(seed)
# Generate the image with the prompt and Stable Diffusion pipeline
cfg = 8 # Configuring guidance scale
steps = 30 # Number of inference steps
width, height = 640, 512
# pipeline without controlnet
img = sd_pipeline(prompt=gen_prompt, guidance_scale=cfg, num_inference_steps=steps, width=width, height=height, generator=generator).images[0]
# Append the seed and image to the list for tracking
images.append((seed, img))
return images
# Function to modify image using ControlNet pipeline
def modify_image_with_controlnet(selected_image, additional_prompt, option='living_room', adj_1="spacious", architecture_style="modern", aesthetic="minimalist",
primary_color="brown", wood_finish="walnut", wall_color="beige", tiles="ceramic"):
gen_prompt = generate_prompt(option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles)
gen_prompt += '(('+additional_prompt+'))'
seed = selected_image[0]
generator = torch.manual_seed(seed)
cfg = 8 # Configuring guidance scale
steps = 30 # Number of inference steps
width, height = 640, 512
# preprocessig for control net
img = np.array(selected_image[1])
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, 100, 200)
edges_image = Image.fromarray(edges)
controlnet_strength = 0.85
image = controlnet_pipeline(prompt=gen_prompt, image=edges_image, guidance_scale=cfg, width=width, height=height,
controlnet_conditioning_scale=controlnet_strength, num_inference_steps=steps, generator=generator).images[0]
return image
# Gradio app logic
def generate_image(option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles, n_images, progress=gr.Progress()):
generated_images = generate_initial_image(
n_images=n_images,
option=option,
adj_1=adj_1,
architecture_style=architecture_style,
aesthetic=aesthetic,
primary_color=primary_color,
wood_finish=wood_finish,
wall_color=wall_color,
tiles=tiles
)
display_images = [img.resize((640, 512)) for _, img in generated_images] # Resize only for display
image_identifiers = [f"Image-{i+1}" for i in range(n_images)]
return display_images, generated_images, image_identifiers
def modify_image(selected_image_id, additional_prompt, option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles, images):
# Parse selected image identifier to get the index
image_index = int(selected_image_id.split("-")[1]) - 1
selected_image = images[image_index] # Retrieve the (seed, original_img) tuple without resizing
# Modify image using ControlNet
modified_image = modify_image_with_controlnet(
selected_image=selected_image,
additional_prompt=additional_prompt,
option=option,
adj_1=adj_1,
architecture_style=architecture_style,
aesthetic=aesthetic,
primary_color=primary_color,
wood_finish=wood_finish,
wall_color=wall_color,
tiles=tiles
)
return modified_image
# Interface
with gr.Blocks() as app:
gr.Markdown("# Interior Design Image Generation and Modification")
# Image generation options
with gr.Row():
option = gr.Radio(label="Room Type", choices=option_choices, value="living_room")
adj_1 = gr.Radio(label="Primary Adjective", choices=adj_1_choices, value="spacious")
architecture_style = gr.Radio(label="Architecture Style", choices=architecture_style_choices, value="modern")
with gr.Row():
aesthetic = gr.Radio(label="Aesthetic", choices=aesthetic_choices, value="minimalist")
primary_color = gr.Radio(label="Primary Color", choices=primary_color_choices, value="neutral")
wood_finish = gr.Radio(label="Wood Finish", choices=wood_finish_choices, value="oak")
with gr.Row():
wall_color = gr.Radio(label="Wall Color", choices=wall_color_choices, value="off-white")
tiles = gr.Radio(label="Tile Type", choices=tiles_choices, value="marble")
n_images = gr.Slider(label="Number of Images to Generate", minimum=1, maximum=5, step=1, value=1)
# Button to generate images
generate_btn = gr.Button("Generate Images")
output_images = gr.Gallery(label="Generated Images", columns=5) # Display resized images in gallery layout
output_generated_images = gr.State() # Hidden state to store original (unmodified) images data
output_identifiers = gr.State() # Hidden state for image identifiers
# Image selection and modification
selected_image_id = gr.Radio(label="Select an Image for Modification", choices=[])
additional_prompt = gr.Textbox(label="Additional Prompt for Modification", placeholder="OPTIONAL")
modify_btn = gr.Button("Modify Image")
modified_image = gr.Image(label="Modified Image")
# Button actions
generate_btn.click(
fn=generate_image,
inputs=[option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles, n_images],
outputs=[output_images, output_generated_images, output_identifiers]
)
# Update radio button options after images are generated
output_images.change(
fn=lambda identifiers: gr.update(choices=identifiers, visible=True), # Populate with image identifiers
inputs=[output_identifiers],
outputs=[selected_image_id]
)
# Trigger modification on button click
modify_btn.click(
fn=modify_image,
inputs=[selected_image_id, additional_prompt, option, adj_1, architecture_style, aesthetic, primary_color, wood_finish, wall_color, tiles, output_generated_images],
outputs=modified_image
)
app.launch(debug=True, share=True)