Spaces:
Runtime error
Runtime error
Jordan Legg
commited on
Commit
Β·
f0decf0
1
Parent(s):
cec333d
safe check
Browse files
app.py
CHANGED
@@ -43,6 +43,7 @@ def infer(prompt, init_image=None, seed=42, randomize_seed=False, width=1024, he
|
|
43 |
fallback_image = Image.new("RGB", (width, height), (255, 0, 0)) # Red image as a fallback
|
44 |
|
45 |
if init_image is None:
|
|
|
46 |
try:
|
47 |
result = pipe(
|
48 |
prompt=prompt,
|
@@ -59,6 +60,7 @@ def infer(prompt, init_image=None, seed=42, randomize_seed=False, width=1024, he
|
|
59 |
print(f"Pipeline call failed with error: {e}")
|
60 |
return fallback_image, seed
|
61 |
else:
|
|
|
62 |
vae_image_size = pipe.vae.config.sample_size # Ensure this is correct
|
63 |
init_image = init_image.convert("RGB")
|
64 |
init_image = preprocess_image(init_image, vae_image_size)
|
@@ -73,7 +75,8 @@ def infer(prompt, init_image=None, seed=42, randomize_seed=False, width=1024, he
|
|
73 |
latents = latents.permute(0, 2, 3, 1).contiguous().view(-1, 64)
|
74 |
|
75 |
try:
|
76 |
-
if 'timesteps'
|
|
|
77 |
timestep = torch.tensor([num_inference_steps], device=device, dtype=dtype)
|
78 |
_ = pipe.transformer(latents, timesteps=timestep)
|
79 |
else:
|
@@ -98,8 +101,6 @@ def infer(prompt, init_image=None, seed=42, randomize_seed=False, width=1024, he
|
|
98 |
|
99 |
return image, seed
|
100 |
|
101 |
-
|
102 |
-
|
103 |
# Define example prompts
|
104 |
examples = [
|
105 |
"a tiny astronaut hatching from an egg on the moon",
|
|
|
43 |
fallback_image = Image.new("RGB", (width, height), (255, 0, 0)) # Red image as a fallback
|
44 |
|
45 |
if init_image is None:
|
46 |
+
# text2img case
|
47 |
try:
|
48 |
result = pipe(
|
49 |
prompt=prompt,
|
|
|
60 |
print(f"Pipeline call failed with error: {e}")
|
61 |
return fallback_image, seed
|
62 |
else:
|
63 |
+
# img2img case
|
64 |
vae_image_size = pipe.vae.config.sample_size # Ensure this is correct
|
65 |
init_image = init_image.convert("RGB")
|
66 |
init_image = preprocess_image(init_image, vae_image_size)
|
|
|
75 |
latents = latents.permute(0, 2, 3, 1).contiguous().view(-1, 64)
|
76 |
|
77 |
try:
|
78 |
+
# Determine if 'timesteps' is required for the transformer
|
79 |
+
if hasattr(pipe.transformer, 'forward') and hasattr(pipe.transformer.forward, '__code__') and 'timesteps' in pipe.transformer.forward.__code__.co_varnames:
|
80 |
timestep = torch.tensor([num_inference_steps], device=device, dtype=dtype)
|
81 |
_ = pipe.transformer(latents, timesteps=timestep)
|
82 |
else:
|
|
|
101 |
|
102 |
return image, seed
|
103 |
|
|
|
|
|
104 |
# Define example prompts
|
105 |
examples = [
|
106 |
"a tiny astronaut hatching from an egg on the moon",
|