Update app.py
Browse files
app.py
CHANGED
@@ -60,7 +60,7 @@ pipe.enable_model_cpu_offload()
|
|
60 |
pipe.enable_xformers_memory_efficient_attention()
|
61 |
|
62 |
# Generator seed,
|
63 |
-
generator = torch.manual_seed(
|
64 |
|
65 |
def get_canny_filter(image):
|
66 |
if not isinstance(image, np.ndarray):
|
@@ -78,6 +78,7 @@ def generate_images(prompt, canny_image):
|
|
78 |
output = pipe(
|
79 |
prompt,
|
80 |
canny_image,
|
|
|
81 |
generator=generator,
|
82 |
num_images_per_prompt=1,
|
83 |
num_inference_steps=20,
|
@@ -136,9 +137,9 @@ def infer():
|
|
136 |
|
137 |
canny_image.save("canny1.jpg")
|
138 |
canny_image2.save("canny2.jpg")
|
139 |
-
input_frame_1 = read_image(str("
|
140 |
print(f"FRAME 1: {input_frame_1}")
|
141 |
-
input_frame_2 = read_image(str("
|
142 |
print(f"FRAME 1: {input_frame_2}")
|
143 |
|
144 |
#img1_batch = torch.stack([frames[0]])
|
|
|
60 |
pipe.enable_xformers_memory_efficient_attention()
|
61 |
|
62 |
# Generator seed,
|
63 |
+
generator = torch.manual_seed(0)
|
64 |
|
65 |
def get_canny_filter(image):
|
66 |
if not isinstance(image, np.ndarray):
|
|
|
78 |
output = pipe(
|
79 |
prompt,
|
80 |
canny_image,
|
81 |
+
seed=42000,
|
82 |
generator=generator,
|
83 |
num_images_per_prompt=1,
|
84 |
num_inference_steps=20,
|
|
|
137 |
|
138 |
canny_image.save("canny1.jpg")
|
139 |
canny_image2.save("canny2.jpg")
|
140 |
+
input_frame_1 = read_image(str("canny1.jpg"), ImageReadMode.UNCHANGED)
|
141 |
print(f"FRAME 1: {input_frame_1}")
|
142 |
+
input_frame_2 = read_image(str("canny2.jpg"), ImageReadMode.UNCHANGED)
|
143 |
print(f"FRAME 1: {input_frame_2}")
|
144 |
|
145 |
#img1_batch = torch.stack([frames[0]])
|