|
--- |
|
title: CompVis Stable Diffusion V1 4 |
|
emoji: π |
|
colorFrom: pink |
|
colorTo: purple |
|
sdk: gradio |
|
pinned: false |
|
license: bigscience-openrail-m |
|
sdk_version: 5.12.0 |
|
--- |
|
|
|
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 # GPU support |
|
pip install diffusers transformers flask pillow accelerate |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
# Authenticate Hugging Face |
|
from huggingface_hub import login |
|
login(token="your_hugging_face_token") |
|
|
|
# Load Stable Diffusion v1-4 |
|
model_id = "CompVis/stable-diffusion-v1-4" |
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) |
|
pipe = pipe.to("cuda") # Use GPU for faster performance |
|
prompt = "A luxurious futuristic bathroom with marble walls and golden accents, panoramic views of a tropical jungle, ultra-realistic, 32k resolution" |
|
num_steps = 50 # Number of diffusion steps |
|
guidance_scale = 7.5 # Higher = more faithful to the prompt |
|
|
|
# Generate an image |
|
image = pipe(prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale).images[0] |
|
|
|
# Save the image |
|
image.save("generated_image.png") |
|
from flask import Flask, request, jsonify, send_file |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
app = Flask(__name__) |
|
|
|
# Load Stable Diffusion v1-4 |
|
model_id = "CompVis/stable-diffusion-v1-4" |
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) |
|
pipe = pipe.to("cuda") |
|
|
|
@app.route("/generate", methods=["POST"]) |
|
def generate_image(): |
|
data = request.json |
|
prompt = data.get("prompt", "A beautiful fantasy landscape") |
|
num_steps = data.get("steps", 50) |
|
guidance_scale = data.get("guidance_scale", 7.5) |
|
|
|
# Generate image |
|
image = pipe(prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale).images[0] |
|
output_path = "output.png" |
|
image.save(output_path) |
|
|
|
return send_file(output_path, mimetype="image/png") |
|
|
|
if __name__ == "__main__": |
|
app.run(host="0.0.0.0", port=5000) |
|
<!DOCTYPE html> |
|
<html lang="en"> |
|
<head> |
|
<meta charset="UTF-8"> |
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
|
<title>Stable Diffusion Generator</title> |
|
</head> |
|
<body> |
|
<h1>Stable Diffusion v1-4 Image Generator</h1> |
|
<form id="image-form"> |
|
<label for="prompt">Prompt:</label><br> |
|
<input type="text" id="prompt" name="prompt" required><br><br> |
|
<label for="steps">Inference Steps:</label><br> |
|
<input type="number" id="steps" name="steps" value="50"><br><br> |
|
<label for="guidance_scale">Guidance Scale:</label><br> |
|
<input type="number" id="guidance_scale" name="guidance_scale" value="7.5"><br><br> |
|
<button type="submit">Generate Image</button> |
|
</form> |
|
|
|
<h2>Generated Image:</h2> |
|
<img id="generated-image" alt="Generated Image" style="max-width: 100%;"> |
|
|
|
<script> |
|
document.getElementById("image-form").addEventListener("submit", async (event) => { |
|
event.preventDefault(); |
|
|
|
const prompt = document.getElementById("prompt").value; |
|
const steps = document.getElementById("steps").value; |
|
const guidanceScale = document.getElementById("guidance_scale").value; |
|
|
|
const response = await fetch("http://localhost:5000/generate", { |
|
method: "POST", |
|
headers: { |
|
"Content-Type": "application/json", |
|
}, |
|
body: JSON.stringify({ prompt, steps, guidance_scale: guidanceScale }), |
|
}); |
|
|
|
if (response.ok) { |
|
const blob = await response.blob(); |
|
const url = URL.createObjectURL(blob); |
|
document.getElementById("generated-image").src = url; |
|
} else { |
|
console.error("Error generating image"); |
|
} |
|
}); |
|
</script> |
|
</body> |
|
</html> |