Update README.md
Browse files
README.md
CHANGED
@@ -6,16 +6,89 @@ base_model:
|
|
6 |
- black-forest-labs/FLUX.1-Canny-dev
|
7 |
---
|
8 |
|
9 |
-
#
|
|
|
|
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
|
|
|
|
|
|
14 |
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
# Codes
|
18 |
-
```
|
19 |
-
hello world
|
20 |
-
```
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
- black-forest-labs/FLUX.1-Canny-dev
|
7 |
---
|
8 |
|
9 |
+
# Introduction
|
10 |
+
This model is fine-tuned based on black-forest-labs/FLUX.1-canny-dev. \
|
11 |
+
The model enhances the performance when used with black-forest-labs/FLUX.1-Redux-dev.
|
12 |
|
13 |
+
# Examples
|
14 |
+
## shoe/clothe generate demo
|
15 |
+

|
16 |
+
## texture generate demo
|
17 |
+

|
18 |
+
## finetuned vs original
|
19 |
+

|
20 |
|
21 |
+
# Inference
|
22 |
+
```
|
23 |
+
import os
|
24 |
+
import torch
|
25 |
+
from diffusers import FluxPriorReduxPipeline, FluxPipeline, FluxControlPipeline, FluxTransformer2DModel
|
26 |
+
from diffusers.utils import load_image
|
27 |
+
from PIL import Image
|
28 |
|
29 |
+
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
|
30 |
+
"black-forest-labs/FLUX.1-Redux-dev",
|
31 |
+
torch_dtype=torch.bfloat16).to("cuda")
|
32 |
|
33 |
+
pipe = FluxControlPipeline.from_pretrained(
|
34 |
+
"black-forest-labs/FLUX.1-Canny-dev",
|
35 |
+
torch_dtype=torch.bfloat16
|
36 |
+
).to("cuda")
|
37 |
+
|
38 |
+
transformer = FluxTransformer2DModel.from_pretrained(
|
39 |
+
"woshin/FLUX.1-Canny-dev-redux",
|
40 |
+
subfolder="transformer",
|
41 |
+
torch_dtype=torch.bfloat16
|
42 |
+
).to("cuda")
|
43 |
+
|
44 |
+
pipe.transformer = transformer
|
45 |
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
def inference_control_redux(style_image, control_image, save_image, size=(1024, 1024)):
|
48 |
+
style_image = load_image(style_image).resize(size)
|
49 |
+
control_image = load_image(control_image).resize(size)
|
50 |
+
pipe_prior_redux_out = pipe_prior_redux(style_image, return_dict=False)
|
51 |
+
prompt_embeds, pooled_prompt_embeds = pipe_prior_redux_out[0], pipe_prior_redux_out[1]
|
52 |
+
image = pipe(
|
53 |
+
control_image=control_image,
|
54 |
+
height=size[1],
|
55 |
+
width=size[0],
|
56 |
+
num_inference_steps=50,
|
57 |
+
guidance_scale=30.0,
|
58 |
+
prompt_embeds=prompt_embeds,
|
59 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
60 |
+
).images[0]
|
61 |
+
|
62 |
+
dst = Image.new('RGB', (size[0] * 3, size[1]))
|
63 |
+
dst.paste(style_image, (0, 0))
|
64 |
+
dst.paste(control_image, (size[0], 0))
|
65 |
+
dst.paste(image, (size[0] * 2, 0))
|
66 |
+
dst.save(save_image)
|
67 |
+
|
68 |
+
def inference_control(style_image, control_image, save_image, size=(1024, 1024)):
|
69 |
+
style_image = load_image(style_image).resize(size)
|
70 |
+
control_image = load_image(control_image).resize(size)
|
71 |
+
pipe_prior_redux_out = pipe_prior_redux(style_image, return_dict=False)
|
72 |
+
prompt_embeds, pooled_prompt_embeds = pipe_prior_redux_out[0], pipe_prior_redux_out[1]
|
73 |
+
image = pipe(
|
74 |
+
control_image=control_image,
|
75 |
+
height=size[1],
|
76 |
+
width=size[0],
|
77 |
+
num_inference_steps=50,
|
78 |
+
guidance_scale=30.0,
|
79 |
+
prompt_embeds=prompt_embeds,
|
80 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
81 |
+
).images[0]
|
82 |
+
|
83 |
+
dst = Image.new('RGB', (size[0] * 3, size[1]))
|
84 |
+
dst.paste(style_image, (0, 0))
|
85 |
+
dst.paste(control_image, (size[0], 0))
|
86 |
+
dst.paste(image, (size[0] * 2, 0))
|
87 |
+
dst.save(save_image)
|
88 |
+
|
89 |
+
style_image = "style.png" # download from this repo
|
90 |
+
control_image = "control.png"
|
91 |
+
save_image = "save.png"
|
92 |
+
inference_control_redux(style_image, control_image, save_image)
|
93 |
+
|
94 |
+
```
|