Update app.py
Browse files
app.py
CHANGED
@@ -106,7 +106,7 @@ def process_brightness(
|
|
106 |
|
107 |
return output_image
|
108 |
|
109 |
-
def infer(video_in, prompt,
|
110 |
negative_prompt,
|
111 |
conditioning_image,
|
112 |
num_inference_steps=30,
|
@@ -119,8 +119,8 @@ def infer(video_in, prompt,
|
|
119 |
break_vid = get_frames(video_in)
|
120 |
frames_list= break_vid[0]
|
121 |
fps = break_vid[1]
|
122 |
-
|
123 |
-
n_frame = len(frames_list)
|
124 |
|
125 |
if n_frame >= len(frames_list):
|
126 |
print("video is shorter than the cut value")
|
@@ -178,6 +178,7 @@ with gr.Blocks() as demo:
|
|
178 |
source="upload",
|
179 |
type="filepath"
|
180 |
)
|
|
|
181 |
with gr.Accordion('Advanced options', open=False):
|
182 |
with gr.Row():
|
183 |
num_inference_steps = gr.Slider(
|
@@ -218,26 +219,11 @@ with gr.Blocks() as demo:
|
|
218 |
submit_btn.click(
|
219 |
fn=infer,
|
220 |
inputs=[
|
221 |
-
video_in, prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed
|
222 |
],
|
223 |
outputs=output
|
224 |
)
|
225 |
-
|
226 |
-
examples=[
|
227 |
-
["a village in the mountains", "monochrome", "./conditioning_images/conditioning_image_1.jpg"],
|
228 |
-
["three people walking in an alleyway with hats and pants", "monochrome", "./conditioning_images/conditioning_image_2.jpg"],
|
229 |
-
["an anime character, natural skin", "monochrome, blue skin, grayscale", "./conditioning_images/conditioning_image_3.jpg"],
|
230 |
-
["a man in a black suit", "monochrome", "./conditioning_images/conditioning_image_4.jpg"],
|
231 |
-
["the forbidden city in beijing at sunset with a reflection in the water", "monochrome", "./conditioning_images/conditioning_image_5.jpg"],
|
232 |
-
["a man in a white shirt holding his hand out in front of", "monochrome", "./conditioning_images/conditioning_image_6.jpg"],
|
233 |
-
],
|
234 |
-
inputs=[
|
235 |
-
prompt, negative_prompt, conditioning_image
|
236 |
-
],
|
237 |
-
outputs=output,
|
238 |
-
fn=infer,
|
239 |
-
cache_examples=False,
|
240 |
-
)
|
241 |
gr.Markdown(
|
242 |
"""
|
243 |
* [Dataset](https://huggingface.co/datasets/ioclab/grayscale_image_aesthetic_3M)
|
|
|
106 |
|
107 |
return output_image
|
108 |
|
109 |
+
def infer(video_in, trim_value, prompt,
|
110 |
negative_prompt,
|
111 |
conditioning_image,
|
112 |
num_inference_steps=30,
|
|
|
119 |
break_vid = get_frames(video_in)
|
120 |
frames_list= break_vid[0]
|
121 |
fps = break_vid[1]
|
122 |
+
n_frame = int(trim_value*fps)
|
123 |
+
#n_frame = len(frames_list)
|
124 |
|
125 |
if n_frame >= len(frames_list):
|
126 |
print("video is shorter than the cut value")
|
|
|
178 |
source="upload",
|
179 |
type="filepath"
|
180 |
)
|
181 |
+
trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1)
|
182 |
with gr.Accordion('Advanced options', open=False):
|
183 |
with gr.Row():
|
184 |
num_inference_steps = gr.Slider(
|
|
|
219 |
submit_btn.click(
|
220 |
fn=infer,
|
221 |
inputs=[
|
222 |
+
video_in, prompt, trim_in, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed
|
223 |
],
|
224 |
outputs=output
|
225 |
)
|
226 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
gr.Markdown(
|
228 |
"""
|
229 |
* [Dataset](https://huggingface.co/datasets/ioclab/grayscale_image_aesthetic_3M)
|