Update handler.py
Browse files- handler.py +7 -7
handler.py
CHANGED
@@ -7,17 +7,17 @@ from diffusers.pipelines import FluxControlNetPipeline
|
|
7 |
from io import BytesIO
|
8 |
|
9 |
class EndpointHandler:
|
10 |
-
def __init__(self, model_dir="
|
11 |
# Access the environment variable
|
12 |
HUGGINGFACE_API_TOKEN = os.getenv('HUGGINGFACE_API_TOKEN')
|
13 |
if not HUGGINGFACE_API_TOKEN:
|
14 |
-
raise ValueError("HUGGINGFACE_API_TOKEN")
|
15 |
|
16 |
# Load model and pipeline
|
17 |
-
self.controlnet = FluxControlNetModel.
|
18 |
model_dir, torch_dtype=torch.bfloat16, use_auth_token=HUGGINGFACE_API_TOKEN
|
19 |
)
|
20 |
-
self.pipe = FluxControlNetPipeline.
|
21 |
"black-forest-labs/FLUX.1-dev",
|
22 |
controlnet=self.controlnet,
|
23 |
torch_dtype=torch.bfloat16,
|
@@ -35,7 +35,7 @@ class EndpointHandler:
|
|
35 |
# Upscale x4
|
36 |
return image.resize((w * 4, h * 4))
|
37 |
|
38 |
-
def
|
39 |
# Save output image to a file-like object
|
40 |
buffer = BytesIO()
|
41 |
output.save(buffer, format="PNG")
|
@@ -49,9 +49,9 @@ class EndpointHandler:
|
|
49 |
output_image = self.pipe(
|
50 |
prompt=data.get("prompt", ""),
|
51 |
control_image=control_image,
|
52 |
-
|
53 |
num_inference_steps=28,
|
54 |
-
height=control_image.
|
55 |
width=control_image.size[0],
|
56 |
).images[0]
|
57 |
# Postprocess output
|
|
|
7 |
from io import BytesIO
|
8 |
|
9 |
class EndpointHandler:
|
10 |
+
def __init__(self, model_dir="huyai123/Flux.1-dev-Image-Upscaler"):
|
11 |
# Access the environment variable
|
12 |
HUGGINGFACE_API_TOKEN = os.getenv('HUGGINGFACE_API_TOKEN')
|
13 |
if not HUGGINGFACE_API_TOKEN:
|
14 |
+
raise ValueError("HUGGINGFACE_API_TOKEN environment variable is not set")
|
15 |
|
16 |
# Load model and pipeline
|
17 |
+
self.controlnet = FluxControlNetModel.from_pretrained(
|
18 |
model_dir, torch_dtype=torch.bfloat16, use_auth_token=HUGGINGFACE_API_TOKEN
|
19 |
)
|
20 |
+
self.pipe = FluxControlNetPipeline.from_pretrained(
|
21 |
"black-forest-labs/FLUX.1-dev",
|
22 |
controlnet=self.controlnet,
|
23 |
torch_dtype=torch.bfloat16,
|
|
|
35 |
# Upscale x4
|
36 |
return image.resize((w * 4, h * 4))
|
37 |
|
38 |
+
def postprocess(self, output):
|
39 |
# Save output image to a file-like object
|
40 |
buffer = BytesIO()
|
41 |
output.save(buffer, format="PNG")
|
|
|
49 |
output_image = self.pipe(
|
50 |
prompt=data.get("prompt", ""),
|
51 |
control_image=control_image,
|
52 |
+
controlnet_conditioning_scale=0.6,
|
53 |
num_inference_steps=28,
|
54 |
+
height=control_image.size[1],
|
55 |
width=control_image.size[0],
|
56 |
).images[0]
|
57 |
# Postprocess output
|