Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,6 +12,7 @@ import math
|
|
| 12 |
from optimization import optimize_pipeline_
|
| 13 |
from qwenimage.pipeline_qwen_image_edit import QwenImageEditPipeline
|
| 14 |
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
|
|
|
| 15 |
|
| 16 |
from huggingface_hub import InferenceClient
|
| 17 |
import math
|
|
@@ -143,6 +144,8 @@ dtype = torch.bfloat16
|
|
| 143 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 144 |
pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=dtype).to(device)
|
| 145 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
|
|
|
|
|
| 146 |
|
| 147 |
# --- Ahead-of-time compilation ---
|
| 148 |
optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
|
|
|
|
| 12 |
from optimization import optimize_pipeline_
|
| 13 |
from qwenimage.pipeline_qwen_image_edit import QwenImageEditPipeline
|
| 14 |
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
| 15 |
+
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 16 |
|
| 17 |
from huggingface_hub import InferenceClient
|
| 18 |
import math
|
|
|
|
| 144 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 145 |
pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=dtype).to(device)
|
| 146 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
| 147 |
+
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 148 |
+
|
| 149 |
|
| 150 |
# --- Ahead-of-time compilation ---
|
| 151 |
optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
|