from flask import Flask, request, jsonify, send_file from diffusers import StableDiffusionPipeline import torch from io import BytesIO from PIL import Image app = Flask(__name__) # Load the model model_id = "sairajg/Text_To_Image" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") # Ensure you're using a GPU for inference @app.route("/generate", methods=["POST"]) def generate_image(): data = request.json prompt = data.get("prompt") if not prompt: return jsonify({"error": "Prompt is required"}), 400 # Generate image with torch.autocast("cuda"): image = pipe(prompt).images[0] # Save image to a BytesIO object img_io = BytesIO() image.save(img_io, format="PNG") img_io.seek(0) return send_file(img_io, mimetype="image/png") if __name__ == "__main__": app.run(host="0.0.0.0", port=5000)