Spaces:
Runtime error
Runtime error
convert to pil image
Browse files
app.py
CHANGED
@@ -10,6 +10,8 @@ import PIL
|
|
10 |
from PIL import Image
|
11 |
import numpy as np
|
12 |
|
|
|
|
|
13 |
output_res = (768,768)
|
14 |
|
15 |
conditioning_image_transforms = T.Compose(
|
@@ -38,12 +40,12 @@ def infer(prompt, negative_prompt, image):
|
|
38 |
# implement your inference function here
|
39 |
inp = Image.fromarray(image)
|
40 |
|
41 |
-
cond_input = conditioning_image_transforms(inp)
|
42 |
#cond_input = T.ToPILImage(cond_input)
|
43 |
|
44 |
output = pipe(
|
45 |
prompt,
|
46 |
-
cond_input
|
47 |
generator=generator,
|
48 |
num_images_per_prompt=4,
|
49 |
num_inference_steps=20
|
|
|
10 |
from PIL import Image
|
11 |
import numpy as np
|
12 |
|
13 |
+
import torchvision.transforms.functional as F
|
14 |
+
|
15 |
output_res = (768,768)
|
16 |
|
17 |
conditioning_image_transforms = T.Compose(
|
|
|
40 |
# implement your inference function here
|
41 |
inp = Image.fromarray(image)
|
42 |
|
43 |
+
cond_input = F.to_pil_image(conditioning_image_transforms(inp)[0])
|
44 |
#cond_input = T.ToPILImage(cond_input)
|
45 |
|
46 |
output = pipe(
|
47 |
prompt,
|
48 |
+
cond_input,
|
49 |
generator=generator,
|
50 |
num_images_per_prompt=4,
|
51 |
num_inference_steps=20
|