fffiloni commited on
Commit
be616f6
·
1 Parent(s): 741ef8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -12
app.py CHANGED
@@ -6,10 +6,6 @@ import torch
6
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
7
  from diffusers.utils import export_to_video
8
 
9
- pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
10
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
11
- pipe.enable_model_cpu_offload()
12
- pipe.to("cuda")
13
  pipe_xl = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/17")
14
  pipe_xl.vae.enable_slicing()
15
  pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
@@ -17,14 +13,9 @@ pipe_xl.enable_model_cpu_offload()
17
  pipe_xl.to("cuda")
18
 
19
 
20
- def infer(prompt):
21
- #prompt = "Darth Vader is surfing on waves"
22
- #pipe.to("cuda")
23
- video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
24
- video_path = export_to_video(video_frames)
25
- print(video_path)
26
 
27
- video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]
28
  #del pipe
29
  #pipe_xl.to("cuda")
30
  video_frames = pipe_xl(prompt, video=video, strength=0.6).frames
@@ -111,6 +102,7 @@ with gr.Blocks(css=css) as demo:
111
  """
112
  )
113
 
 
114
  prompt_in = gr.Textbox(label="Prompt", placeholder="Darth Vader is surfing on waves", elem_id="prompt-in")
115
  #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
116
  submit_btn = gr.Button("Submit")
@@ -122,7 +114,7 @@ with gr.Blocks(css=css) as demo:
122
  share_button = gr.Button("Share to community", elem_id="share-btn")
123
 
124
  submit_btn.click(fn=infer,
125
- inputs=[prompt_in],
126
  outputs=[video_result, share_group])
127
 
128
  share_button.click(None, [], [], _js=share_js)
 
6
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
7
  from diffusers.utils import export_to_video
8
 
 
 
 
 
9
  pipe_xl = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/17")
10
  pipe_xl.vae.enable_slicing()
11
  pipe_xl.scheduler = DPMSolverMultistepScheduler.from_config(pipe_xl.scheduler.config)
 
13
  pipe_xl.to("cuda")
14
 
15
 
16
+ def infer(prompt, video_in):
 
 
 
 
 
17
 
18
+ video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_in]
19
  #del pipe
20
  #pipe_xl.to("cuda")
21
  video_frames = pipe_xl(prompt, video=video, strength=0.6).frames
 
102
  """
103
  )
104
 
105
+ video_in = gr.Video(type="filepath", source="upload")
106
  prompt_in = gr.Textbox(label="Prompt", placeholder="Darth Vader is surfing on waves", elem_id="prompt-in")
107
  #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
108
  submit_btn = gr.Button("Submit")
 
114
  share_button = gr.Button("Share to community", elem_id="share-btn")
115
 
116
  submit_btn.click(fn=infer,
117
+ inputs=[prompt_in, video_in],
118
  outputs=[video_result, share_group])
119
 
120
  share_button.click(None, [], [], _js=share_js)