Spaces:
Runtime error
Runtime error
accepts compel syntax
Browse files- app-img2img.py +4 -1
- img2img/index.html +7 -4
- tailwind.config.js +0 -0
app-img2img.py
CHANGED
|
@@ -10,6 +10,7 @@ from fastapi.responses import StreamingResponse, JSONResponse
|
|
| 10 |
from fastapi.staticfiles import StaticFiles
|
| 11 |
|
| 12 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
|
|
|
| 13 |
import torch
|
| 14 |
from PIL import Image
|
| 15 |
import numpy as np
|
|
@@ -48,6 +49,7 @@ pipe.set_progress_bar_config(disable=True)
|
|
| 48 |
pipe.to(torch_device="cuda", torch_dtype=torch.float16)
|
| 49 |
pipe.unet.to(memory_format=torch.channels_last)
|
| 50 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
|
|
| 51 |
user_queue_map = {}
|
| 52 |
|
| 53 |
# for torch.compile
|
|
@@ -55,10 +57,11 @@ pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
|
|
| 55 |
|
| 56 |
def predict(input_image, prompt, guidance_scale=8.0, strength=0.5, seed=2159232):
|
| 57 |
generator = torch.manual_seed(seed)
|
|
|
|
| 58 |
# Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
|
| 59 |
num_inference_steps = 3
|
| 60 |
results = pipe(
|
| 61 |
-
|
| 62 |
generator=generator,
|
| 63 |
image=input_image,
|
| 64 |
strength=strength,
|
|
|
|
| 10 |
from fastapi.staticfiles import StaticFiles
|
| 11 |
|
| 12 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
| 13 |
+
from compel import Compel
|
| 14 |
import torch
|
| 15 |
from PIL import Image
|
| 16 |
import numpy as np
|
|
|
|
| 49 |
pipe.to(torch_device="cuda", torch_dtype=torch.float16)
|
| 50 |
pipe.unet.to(memory_format=torch.channels_last)
|
| 51 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 52 |
+
compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, truncate_long_prompts=False)
|
| 53 |
user_queue_map = {}
|
| 54 |
|
| 55 |
# for torch.compile
|
|
|
|
| 57 |
|
| 58 |
def predict(input_image, prompt, guidance_scale=8.0, strength=0.5, seed=2159232):
|
| 59 |
generator = torch.manual_seed(seed)
|
| 60 |
+
prompt_embeds = compel_proc(prompt)
|
| 61 |
# Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
|
| 62 |
num_inference_steps = 3
|
| 63 |
results = pipe(
|
| 64 |
+
prompt_embeds=prompt_embeds,
|
| 65 |
generator=generator,
|
| 66 |
image=input_image,
|
| 67 |
strength=strength,
|
img2img/index.html
CHANGED
|
@@ -171,23 +171,26 @@
|
|
| 171 |
<p class="text-sm">
|
| 172 |
This demo showcases
|
| 173 |
<a href="https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7" target="_blank"
|
| 174 |
-
class="text-blue-500 hover:underline">LCM</a> Image to Image pipeline
|
| 175 |
using
|
| 176 |
<a href="https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline"
|
| 177 |
-
target="_blank" class="text-blue-500 hover:underline">Diffusers</a> with a MJPEG
|
| 178 |
stream server.
|
| 179 |
</p>
|
| 180 |
<p class="text-sm">
|
| 181 |
There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU, affecting
|
| 182 |
real-time performance. Maximum queue size is 4. <a
|
| 183 |
href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
|
| 184 |
-
target="_blank" class="text-blue-500 hover:underline">Duplicate</a> and run it on your
|
|
|
|
| 185 |
</p>
|
| 186 |
</article>
|
| 187 |
<div>
|
| 188 |
<h2 class="font-medium">Prompt</h2>
|
| 189 |
<p class="text-sm text-gray-500">
|
| 190 |
-
Change the prompt to generate different images
|
|
|
|
|
|
|
| 191 |
</p>
|
| 192 |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
|
| 193 |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none"
|
|
|
|
| 171 |
<p class="text-sm">
|
| 172 |
This demo showcases
|
| 173 |
<a href="https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7" target="_blank"
|
| 174 |
+
class="text-blue-500 underline hover:no-underline">LCM</a> Image to Image pipeline
|
| 175 |
using
|
| 176 |
<a href="https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline"
|
| 177 |
+
target="_blank" class="text-blue-500 underline hover:no-underline">Diffusers</a> with a MJPEG
|
| 178 |
stream server.
|
| 179 |
</p>
|
| 180 |
<p class="text-sm">
|
| 181 |
There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU, affecting
|
| 182 |
real-time performance. Maximum queue size is 4. <a
|
| 183 |
href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
|
| 184 |
+
target="_blank" class="text-blue-500 underline hover:no-underline">Duplicate</a> and run it on your
|
| 185 |
+
own GPU.
|
| 186 |
</p>
|
| 187 |
</article>
|
| 188 |
<div>
|
| 189 |
<h2 class="font-medium">Prompt</h2>
|
| 190 |
<p class="text-sm text-gray-500">
|
| 191 |
+
Change the prompt to generate different images, accepts <a
|
| 192 |
+
href="https://github.com/damian0815/compel/blob/main/doc/syntax.md" target="_blank"
|
| 193 |
+
class="text-blue-500 underline hover:no-underline">Compel</a> syntax.
|
| 194 |
</p>
|
| 195 |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
|
| 196 |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none"
|
tailwind.config.js
DELETED
|
File without changes
|