shaaravpawar commited on
Commit
a3f2ded
·
verified ·
1 Parent(s): 9968452

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -45
app.py CHANGED
@@ -1,48 +1,26 @@
1
- from flask import Flask, request, jsonify
2
- from diffusers import DiffusionPipeline
3
  import torch
4
- from PIL import Image
5
- import io
6
- import base64
7
-
8
- app = Flask(__name__)
9
-
10
- # Load the instruct-pix2pix model
11
- model_id = "timbrooks/instruct-pix2pix"
12
- pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
13
- pipeline.to("cuda") # Use "cpu" if you're running without a GPU
14
-
15
- @app.route("/", methods=["GET"])
16
- def home():
17
- return "Welcome to the Instruct-Pix2Pix API!"
18
-
19
- @app.route("/edit-image", methods=["POST"])
20
- def edit_image():
21
- try:
22
- # Extract the prompt and image from the request
23
- data = request.json
24
- prompt = data.get("prompt", "A beautiful landscape with a sunset")
25
- image_data = data.get("image") # Expected as base64 encoded string
26
-
27
- # Decode base64 image
28
- image = Image.open(io.BytesIO(base64.b64decode(image_data)))
29
-
30
- # Run the model with the prompt and image
31
- edited_image = pipeline(prompt=prompt, image=image).images[0]
32
-
33
- # Save the edited image
34
- output_image_path = "edited_image.png"
35
- edited_image.save(output_image_path)
36
-
37
- # Optionally return the image as base64 in the response
38
- buffered = io.BytesIO()
39
- edited_image.save(buffered, format="PNG")
40
- img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
41
-
42
- return jsonify({"message": "Image edited successfully!", "edited_image": img_str})
43
-
44
- except Exception as e:
45
- return jsonify({"error": str(e)}), 500
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  if __name__ == "__main__":
48
- app.run(host="0.0.0.0", port=5000)
 
1
+ import gradio as gr
 
2
  import torch
3
+ from diffusers import DiffusionPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ # Load the pre-trained model from Hugging Face
6
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16)
7
+ pipe.to("cuda") # Ensure the model runs on the GPU if available
8
+
9
+ # Define the function for the Gradio interface
10
+ def generate_image(prompt):
11
+ # Generate an image using the provided prompt
12
+ image = pipe(prompt).images[0]
13
+ return image
14
+
15
+ # Set up the Gradio interface
16
+ interface = gr.Interface(
17
+ fn=generate_image,
18
+ inputs="text",
19
+ outputs="image",
20
+ title="Stable Diffusion XL Refiner",
21
+ description="Generate images from text prompts using Stable Diffusion XL Refiner 1.0"
22
+ )
23
+
24
+ # Launch the Gradio app
25
  if __name__ == "__main__":
26
+ interface.launch()