|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline, DiffusionPipeline |
|
import torch |
|
|
|
|
|
def load_model(base_model_id, adapter_model_id): |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
info = f"Running on {'GPU (CUDA) 🔥' if device == 'cuda' else 'CPU 🥶'}" |
|
|
|
try: |
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
base_model_id, |
|
torch_dtype=torch.float16 if device == "cuda" else torch.float32 |
|
).to(device) |
|
|
|
|
|
if adapter_model_id: |
|
adapter_pipe = DiffusionPipeline.from_pretrained(adapter_model_id) |
|
adapter_pipe.load_lora_weights(base_model_id) |
|
pipe = pipe.to(device) |
|
|
|
return pipe, info |
|
except Exception as e: |
|
return None, f"Error loading model: {str(e)}" |
|
|
|
|
|
def generate_image(base_model_id, adapter_model_id, prompt): |
|
pipe, info = load_model(base_model_id, adapter_model_id) |
|
|
|
if pipe is None: |
|
return None, info |
|
|
|
|
|
try: |
|
image = pipe(prompt).images[0] |
|
return image, info |
|
except Exception as e: |
|
return None, f"Error generating image: {str(e)}" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Custom Text-to-Image Generator with Adapter Support") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
base_model_id = gr.Textbox( |
|
label="Enter Base Model ID (e.g., CompVis/stable-diffusion-v1-4)", |
|
placeholder="Base Model ID" |
|
) |
|
adapter_model_id = gr.Textbox( |
|
label="Enter Adapter Model ID (optional, e.g., nevreal/vMurderDrones-Lora)", |
|
placeholder="Adapter Model ID (optional)", |
|
value="" |
|
) |
|
prompt = gr.Textbox( |
|
label="Enter your prompt", |
|
placeholder="Describe the image you want to generate" |
|
) |
|
generate_btn = gr.Button("Generate Image") |
|
|
|
with gr.Column(): |
|
output_image = gr.Image(label="Generated Image") |
|
device_info = gr.Markdown() |
|
|
|
|
|
generate_btn.click( |
|
fn=generate_image, |
|
inputs=[base_model_id, adapter_model_id, prompt], |
|
outputs=[output_image, device_info] |
|
) |
|
|
|
|
|
demo.launch() |
|
|