yamildiego commited on
Commit
3a07267
·
1 Parent(s): 5893a8e

back to the version with no control net (before that CN works)

Browse files
Files changed (1) hide show
  1. handler.py +25 -77
handler.py CHANGED
@@ -2,102 +2,50 @@ from typing import Dict, List, Any
2
  import base64
3
  from PIL import Image
4
  from io import BytesIO
5
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
6
  from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
 
7
 
8
  import torch
9
 
10
 
11
- import numpy as np
12
- import cv2
13
- import controlnet_hinter
14
-
15
- # set device
16
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17
  if device.type != 'cuda':
18
  raise ValueError("need to run on GPU")
19
  # set mixed precision dtype
20
  dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
21
-
22
- CONTROLNET_MAPPING = {
23
- "depth": {
24
- "model_id": "lllyasviel/sd-controlnet-depth",
25
- "hinter": controlnet_hinter.hint_depth
26
- },
27
- }
28
 
29
  class EndpointHandler():
30
- def __init__(self, path=""):
31
- self.control_type = "depth"
32
- self.controlnet = ControlNetModel.from_pretrained(CONTROLNET_MAPPING[self.control_type]["model_id"],torch_dtype=dtype).to(device)
33
-
34
- self.stable_diffusion_id_0 = "Lykon/dreamshaper-8"
35
- self.dreamshaper = StableDiffusionControlNetPipeline.from_pretrained(self.stable_diffusion_id_0,
36
- controlnet=self.controlnet,
37
- torch_dtype=dtype,
38
- safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker", torch_dtype=dtype)).to("cuda")
39
- self.stable_diffusion_id_1 = "runwayml/stable-diffusion-v1-5"
40
- self.runwayml = StableDiffusionControlNetPipeline.from_pretrained(self.stable_diffusion_id_1,
41
- controlnet=self.controlnet,
42
- torch_dtype=dtype,
43
- safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker", torch_dtype=dtype)).to("cuda")
44
- # Define Generator with seed
45
- self.generator = torch.Generator(device=device.type).manual_seed(3)
46
-
47
- def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
48
- """
49
- :param data: A dictionary contains `prompt` and optional `image_depth_map` field.
50
- :return: A dictionary with `image` field contains image in base64.
51
- """
52
-
53
-
54
- # hyperparamters
55
- sd_model = data.pop("sd_model", "dreamshaper")
56
- prompt = data.pop("inputs", None)
57
- negative_prompt = data.pop("negative_prompt", None)
58
- image_depth_map = data.pop("image_depth_map", None)
59
- steps = data.pop("steps", 25)
60
- scale = data.pop("scale", 7)
61
- height = data.pop("height", None)
62
- width = data.pop("width", None)
63
- controlnet_conditioning_scale = data.pop("controlnet_conditioning_scale", 1.0)
64
-
65
-
66
- if sd_model is None or not hasattr(self, sd_model):
67
- return {"error": "Model SD not found"}
68
-
69
- if prompt is None:
70
- return {"error": "Please provide a prompt"}
71
-
72
- if(image_depth_map is None):
73
- return {"error": "Please provide a image_depth_map"}
74
-
75
-
76
- pipe = getattr(self, sd_model)
77
-
78
- # process image
79
- image = self.decode_base64_image(image_depth_map)
80
 
81
  # run inference pipeline
82
- out = pipe(
83
  prompt=prompt,
84
  negative_prompt=negative_prompt,
85
- image=image,
86
- num_inference_steps=steps,
87
- guidance_scale=scale,
88
  num_images_per_prompt=1,
89
  height=height,
90
  width=width,
91
- controlnet_conditioning_scale=controlnet_conditioning_scale,
92
  generator=self.generator
93
  )
 
94
 
95
- # return first generate PIL image
96
- return out.images[0]
97
-
98
- # helper to decode input image
99
- def decode_base64_image(self, image_string):
100
- base64_image = base64.b64decode(image_string)
101
- buffer = BytesIO(base64_image)
102
- image = Image.open(buffer)
103
- return image
 
2
  import base64
3
  from PIL import Image
4
  from io import BytesIO
 
5
  from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
6
+ from diffusers import StableDiffusionPipeline
7
 
8
  import torch
9
 
10
 
11
+ # # set device
 
 
 
 
12
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
  if device.type != 'cuda':
14
  raise ValueError("need to run on GPU")
15
  # set mixed precision dtype
16
  dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
 
 
 
 
 
 
 
17
 
18
  class EndpointHandler():
19
+ def __init__(self, path=""):
20
+ self.stable_diffusion_id = "Lykon/dreamshaper-8"
21
+ self.pipe = StableDiffusionPipeline.from_pretrained(self.stable_diffusion_id,torch_dtype=dtype,safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker", torch_dtype=dtype)).to(device.type)
22
+
23
+ self.generator = torch.Generator(device=device.type).manual_seed(3)
24
+
25
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
26
+ # """
27
+ # :param data: A dictionary contains `inputs` and optional `image` field.
28
+ # :return: A dictionary with `image` field contains image in base64.
29
+ # """
30
+ prompt = data.pop("inputs", None)
31
+ num_inference_steps = data.pop("num_inference_steps", 30)
32
+ guidance_scale = data.pop("guidance_scale", 7.4)
33
+ negative_prompt = data.pop("negative_prompt", None)
34
+ height = data.pop("height", None)
35
+ width = data.pop("width", None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  # run inference pipeline
38
+ out = self.pipe(
39
  prompt=prompt,
40
  negative_prompt=negative_prompt,
41
+ num_inference_steps=num_inference_steps,
42
+ guidance_scale=guidance_scale,
 
43
  num_images_per_prompt=1,
44
  height=height,
45
  width=width,
 
46
  generator=self.generator
47
  )
48
+
49
 
50
+ # return first generate PIL image
51
+ return out.images[0]