1aurent commited on
Commit
74a242e
·
unverified ·
0 Parent(s):
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .gradio
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
112
+ .pdm.toml
113
+ .pdm-python
114
+ .pdm-build/
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: TryOffAnyone
3
+ emoji: 👕
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 5.9.1
8
+ app_file: src/app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
examples/model_1.jpg ADDED
examples/model_2.jpg ADDED
examples/model_3.jpg ADDED
examples/model_4.jpg ADDED
examples/model_5.jpg ADDED
examples/model_6.jpg ADDED
examples/model_7.jpg ADDED
examples/model_8.jpg ADDED
examples/model_9.jpg ADDED
pyproject.toml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ dependencies = [
3
+ "accelerate>=1.2.1",
4
+ "diffusers>=0.32.1",
5
+ "gradio>=5.9.1",
6
+ "pillow-heif>=0.21.0",
7
+ "scikit-image>=0.25.0",
8
+ "spaces>=0.31.1",
9
+ "torchvision>=0.20.1",
10
+ "transformers>=4.47.1",
11
+ ]
12
+ description = "TryOffAnyone: Tiled Cloth Generation from a Dressed Person "
13
+ name = "try-off-anyone"
14
+ readme = "README.md"
15
+ requires-python = ">=3.11"
16
+ version = "0.1.0"
17
+
18
+ [tool.ruff]
19
+ line-length = 120
src/app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict
2
+
3
+ import diffusers.image_processor
4
+ import gradio as gr
5
+ import pillow_heif # pyright: ignore[reportMissingTypeStubs]
6
+ import spaces # pyright: ignore[reportMissingTypeStubs]
7
+ import torch
8
+ from PIL import Image
9
+
10
+ from pipeline import TryOffAnyone
11
+
12
+ pillow_heif.register_heif_opener() # pyright: ignore[reportUnknownMemberType]
13
+ pillow_heif.register_avif_opener() # pyright: ignore[reportUnknownMemberType]
14
+
15
+ torch.set_float32_matmul_precision("high")
16
+ torch.backends.cuda.matmul.allow_tf32 = True
17
+
18
+ TITLE = """
19
+ # Try Off Anyone
20
+
21
+ ## ⚠️ Important
22
+
23
+ 1. Choose an example image or upload your own
24
+ 2. Use the Pen tool to draw a mask over the clothing area you want to extract
25
+
26
+ [![arxiv badge](https://img.shields.io/badge/arXiv-Paper-b31b1b.svg?style=for-the-badge)](https://arxiv.org/abs/2412.08573)
27
+ """
28
+
29
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
+ DTYPE = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32
31
+
32
+ pipeline_tryoff = TryOffAnyone(
33
+ device=DEVICE,
34
+ dtype=DTYPE,
35
+ )
36
+ mask_processor = diffusers.image_processor.VaeImageProcessor(
37
+ vae_scale_factor=8,
38
+ do_normalize=False,
39
+ do_binarize=True,
40
+ do_convert_grayscale=True,
41
+ )
42
+ vae_processor = diffusers.image_processor.VaeImageProcessor(
43
+ vae_scale_factor=8,
44
+ )
45
+
46
+
47
+ class ImageData(TypedDict):
48
+ background: Image.Image
49
+ composite: Image.Image
50
+ layers: list[Image.Image]
51
+
52
+
53
+ @spaces.GPU
54
+ def process(
55
+ image_data: ImageData,
56
+ image_width: int,
57
+ image_height: int,
58
+ num_inference_steps: int,
59
+ condition_scale: float,
60
+ seed: int,
61
+ ) -> Image.Image:
62
+ assert image_width > 0
63
+ assert image_height > 0
64
+ assert num_inference_steps > 0
65
+ assert condition_scale > 0
66
+ assert seed >= 0
67
+
68
+ # extract image and mask from image_data
69
+ image = image_data["background"]
70
+ mask = image_data["layers"][0]
71
+
72
+ # preprocess image
73
+ image = image.convert("RGB").resize((image_width, image_height))
74
+ image_preprocessed = vae_processor.preprocess( # pyright: ignore[reportUnknownMemberType,reportAssignmentType]
75
+ image=image,
76
+ width=image_width,
77
+ height=image_height,
78
+ )[0]
79
+
80
+ # preprocess mask
81
+ mask = mask.getchannel("A").resize((image_width, image_height))
82
+ mask_preprocessed = mask_processor.preprocess( # pyright: ignore[reportUnknownMemberType]
83
+ image=mask,
84
+ width=image_width,
85
+ height=image_height,
86
+ )[0]
87
+
88
+ # generate the TryOff image
89
+ gen = torch.Generator(device=DEVICE).manual_seed(seed)
90
+ tryoff_image = pipeline_tryoff(
91
+ image_preprocessed,
92
+ mask_preprocessed,
93
+ inference_steps=num_inference_steps,
94
+ scale=condition_scale,
95
+ generator=gen,
96
+ )[0]
97
+
98
+ return tryoff_image
99
+
100
+
101
+ with gr.Blocks() as demo:
102
+ gr.Markdown(TITLE)
103
+
104
+ with gr.Row():
105
+ with gr.Column():
106
+ input_image = gr.ImageMask(
107
+ label="Input Image",
108
+ height=1024, # https://github.com/gradio-app/gradio/issues/10236
109
+ type="pil",
110
+ interactive=True,
111
+ )
112
+ run_button = gr.Button(
113
+ value="Extract Clothing",
114
+ )
115
+ gr.Examples(
116
+ examples=[
117
+ ["examples/model_1.jpg"],
118
+ ["examples/model_2.jpg"],
119
+ ["examples/model_3.jpg"],
120
+ ["examples/model_4.jpg"],
121
+ ["examples/model_5.jpg"],
122
+ ["examples/model_6.jpg"],
123
+ ["examples/model_7.jpg"],
124
+ ["examples/model_8.jpg"],
125
+ ["examples/model_9.jpg"],
126
+ ],
127
+ inputs=[input_image],
128
+ )
129
+ with gr.Column():
130
+ output_image = gr.Image(
131
+ label="TryOff result",
132
+ height=1024,
133
+ image_mode="RGB",
134
+ type="pil",
135
+ )
136
+
137
+ with gr.Accordion("Advanced Settings", open=True):
138
+ seed = gr.Slider(
139
+ label="Seed",
140
+ minimum=0,
141
+ maximum=100_000,
142
+ value=69_420,
143
+ step=1,
144
+ )
145
+ scale = gr.Slider(
146
+ label="Scale",
147
+ minimum=0.5,
148
+ maximum=5,
149
+ value=2.5,
150
+ step=0.05,
151
+ )
152
+ num_inference_steps = gr.Slider(
153
+ label="Number of inference steps",
154
+ minimum=1,
155
+ maximum=50,
156
+ value=25,
157
+ step=1,
158
+ )
159
+ with gr.Row():
160
+ image_width = gr.Slider(
161
+ label="Image Width",
162
+ minimum=64,
163
+ maximum=1024,
164
+ value=384,
165
+ step=8,
166
+ )
167
+ image_height = gr.Slider(
168
+ label="Image Height",
169
+ minimum=64,
170
+ maximum=1024,
171
+ value=512,
172
+ step=8,
173
+ )
174
+
175
+ run_button.click(
176
+ fn=process,
177
+ inputs=[
178
+ input_image,
179
+ image_width,
180
+ image_height,
181
+ num_inference_steps,
182
+ scale,
183
+ seed,
184
+ ],
185
+ outputs=output_image,
186
+ )
187
+
188
+ demo.launch()
src/pipeline.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # type: ignore
2
+ # Inspired from https://github.com/ixarchakos/try-off-anyone/blob/aa3045453013065573a647e4536922bac696b968/src/model/pipeline.py
3
+ # Inspired from https://github.com/ixarchakos/try-off-anyone/blob/aa3045453013065573a647e4536922bac696b968/src/model/attention.py
4
+
5
+ import torch
6
+ from accelerate import load_checkpoint_in_model
7
+ from diffusers import AutoencoderKL, DDIMScheduler, UNet2DConditionModel
8
+ from diffusers.models.attention_processor import AttnProcessor
9
+ from diffusers.utils.torch_utils import randn_tensor
10
+ from huggingface_hub import hf_hub_download
11
+ from PIL import Image
12
+
13
+
14
+ class Skip(torch.nn.Module):
15
+ def __init__(self) -> None:
16
+ super().__init__()
17
+
18
+ def __call__(
19
+ self,
20
+ attn: torch.Tensor,
21
+ hidden_states: torch.Tensor,
22
+ encoder_hidden_states: torch.Tensor = None,
23
+ attention_mask: torch.Tensor = None,
24
+ temb: torch.Tensor = None,
25
+ ) -> torch.Tensor:
26
+ return hidden_states
27
+
28
+
29
+ def fine_tuned_modules(unet: UNet2DConditionModel) -> torch.nn.ModuleList:
30
+ trainable_modules = torch.nn.ModuleList()
31
+
32
+ for blocks in [unet.down_blocks, unet.mid_block, unet.up_blocks]:
33
+ if hasattr(blocks, "attentions"):
34
+ trainable_modules.append(blocks.attentions)
35
+ else:
36
+ for block in blocks:
37
+ if hasattr(block, "attentions"):
38
+ trainable_modules.append(block.attentions)
39
+
40
+ return trainable_modules
41
+
42
+
43
+ def skip_cross_attentions(unet: UNet2DConditionModel) -> dict[str, AttnProcessor | Skip]:
44
+ attn_processors = {
45
+ name: unet.attn_processors[name] if name.endswith("attn1.processor") else Skip()
46
+ for name in unet.attn_processors.keys()
47
+ }
48
+ return attn_processors
49
+
50
+
51
+ def encode(image: torch.Tensor, vae: AutoencoderKL) -> torch.Tensor:
52
+ image = image.to(memory_format=torch.contiguous_format).float().to(vae.device, dtype=vae.dtype)
53
+ with torch.no_grad():
54
+ return vae.encode(image).latent_dist.sample() * vae.config.scaling_factor
55
+
56
+
57
+ class TryOffAnyone:
58
+ def __init__(
59
+ self,
60
+ device: torch.device,
61
+ dtype: torch.dtype,
62
+ concat_dim: int = -2,
63
+ ) -> None:
64
+ self.concat_dim = concat_dim
65
+ self.device = device
66
+ self.dtype = dtype
67
+
68
+ self.noise_scheduler = DDIMScheduler.from_pretrained(
69
+ pretrained_model_name_or_path="stable-diffusion-v1-5/stable-diffusion-inpainting",
70
+ subfolder="scheduler",
71
+ )
72
+ self.vae = AutoencoderKL.from_pretrained(
73
+ pretrained_model_name_or_path="stabilityai/sd-vae-ft-mse",
74
+ ).to(device, dtype=dtype)
75
+ self.unet = UNet2DConditionModel.from_pretrained(
76
+ pretrained_model_name_or_path="stable-diffusion-v1-5/stable-diffusion-inpainting",
77
+ subfolder="unet",
78
+ variant="fp16",
79
+ ).to(device, dtype=dtype)
80
+
81
+ self.unet.set_attn_processor(skip_cross_attentions(self.unet))
82
+ load_checkpoint_in_model(
83
+ model=fine_tuned_modules(unet=self.unet),
84
+ checkpoint=hf_hub_download(
85
+ repo_id="ixarchakos/tryOffAnyone",
86
+ filename="model.safetensors",
87
+ ),
88
+ )
89
+
90
+ @torch.no_grad()
91
+ def __call__(
92
+ self,
93
+ image: torch.Tensor,
94
+ mask: torch.Tensor,
95
+ inference_steps: int,
96
+ scale: float,
97
+ generator: torch.Generator,
98
+ ) -> list[Image.Image]:
99
+ image = image.unsqueeze(0).to(self.device, dtype=self.dtype)
100
+ mask = (mask.unsqueeze(0) > 0.5).to(self.device, dtype=self.dtype)
101
+ masked_image = image * (mask < 0.5)
102
+
103
+ masked_latent = encode(masked_image, self.vae)
104
+ image_latent = encode(image, self.vae)
105
+ mask = torch.nn.functional.interpolate(mask, size=masked_latent.shape[-2:], mode="nearest")
106
+
107
+ masked_latent_concat = torch.cat([masked_latent, image_latent], dim=self.concat_dim)
108
+ mask_concat = torch.cat([mask, torch.zeros_like(mask)], dim=self.concat_dim)
109
+
110
+ latents = randn_tensor(
111
+ shape=masked_latent_concat.shape,
112
+ generator=generator,
113
+ device=self.device,
114
+ dtype=self.dtype,
115
+ )
116
+
117
+ self.noise_scheduler.set_timesteps(inference_steps, device=self.device)
118
+ timesteps = self.noise_scheduler.timesteps
119
+
120
+ if do_classifier_free_guidance := (scale > 1.0):
121
+ masked_latent_concat = torch.cat(
122
+ [
123
+ torch.cat([masked_latent, torch.zeros_like(image_latent)], dim=self.concat_dim),
124
+ masked_latent_concat,
125
+ ]
126
+ )
127
+
128
+ mask_concat = torch.cat([mask_concat] * 2)
129
+
130
+ extra_step = {"generator": generator, "eta": 1.0}
131
+ for t in timesteps:
132
+ input_latents = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
133
+ input_latents = self.noise_scheduler.scale_model_input(input_latents, t)
134
+
135
+ input_latents = torch.cat([input_latents, mask_concat, masked_latent_concat], dim=1)
136
+
137
+ noise_pred = self.unet(
138
+ input_latents,
139
+ t.to(self.device),
140
+ encoder_hidden_states=None,
141
+ return_dict=False,
142
+ )[0]
143
+
144
+ if do_classifier_free_guidance:
145
+ noise_pred_unc, noise_pred_text = noise_pred.chunk(2)
146
+ noise_pred = noise_pred_unc + scale * (noise_pred_text - noise_pred_unc)
147
+
148
+ latents = self.noise_scheduler.step(noise_pred, t, latents, **extra_step).prev_sample
149
+
150
+ latents = latents.split(latents.shape[self.concat_dim] // 2, dim=self.concat_dim)[0]
151
+ latents = 1 / self.vae.config.scaling_factor * latents
152
+ image = self.vae.decode(latents.to(self.device, dtype=self.dtype)).sample
153
+ image = (image / 2 + 0.5).clamp(0, 1)
154
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
155
+
156
+ image = (image * 255).round().astype("uint8")
157
+ image = [Image.fromarray(im) for im in image]
158
+
159
+ return image
uv.lock ADDED
The diff for this file is too large to render. See raw diff