Sergidev commited on
Commit
8cad36b
·
1 Parent(s): 78672d7
Files changed (5) hide show
  1. README.md +2 -2
  2. app.py +48 -0
  3. cache_manager.py +7 -0
  4. config.json +6 -0
  5. requirements.txt +8 -0
README.md CHANGED
@@ -4,9 +4,9 @@ emoji: 🏢
4
  colorFrom: yellow
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.17.1
8
  app_file: app.py
9
- pinned: false
10
  short_description: Image-to-Video
11
  ---
12
 
 
4
  colorFrom: yellow
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.12.0
8
  app_file: app.py
9
+ pinned: true
10
  short_description: Image-to-Video
11
  ---
12
 
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ from hunyuan_model import HunyuanVideoPipeline
4
+ from diffusers.utils import export_to_video
5
+ import huggingface_hub
6
+ import spaces
7
+
8
+ @spaces.GPU(duration=120)
9
+ def generate_video(image, video_length=85, infer_steps=30, seed=123):
10
+ pipeline = HunyuanVideoPipeline.from_pretrained(
11
+ "AeroScripts/leapfusion-hunyuan-image2video",
12
+ variant="fp8",
13
+ torch_dtype=torch.float16
14
+ )
15
+
16
+ generator = torch.Generator().manual_seed(seed)
17
+ frames = pipeline(
18
+ image,
19
+ video_length=video_length,
20
+ num_inference_steps=infer_steps,
21
+ generator=generator
22
+ ).frames
23
+
24
+ video_path = export_to_video(frames)
25
+ return video_path
26
+
27
+ interface = gr.Blocks(title="Hunyuan-ITV")
28
+
29
+ with interface:
30
+ gr.Markdown("# Hunyuan Image-to-Video Converter")
31
+ with gr.Row():
32
+ with gr.Column():
33
+ input_image = gr.Image(label="Input Image", type="filepath")
34
+ video_length = gr.Slider(16, 120, value=85, label="Frame Count")
35
+ infer_steps = gr.Slider(10, 50, value=30, step=5, label="Inference Steps")
36
+ seed = gr.Number(123, label="Random Seed")
37
+ submit_btn = gr.Button("Generate Video")
38
+ with gr.Column():
39
+ output_video = gr.Video(label="Generated Video")
40
+
41
+ submit_btn.click(
42
+ generate_video,
43
+ inputs=[input_image, video_length, infer_steps, seed],
44
+ outputs=output_video
45
+ )
46
+
47
+ if __name__ == "__main__":
48
+ interface.launch(server_name="0.0.0.0", server_port=7860)
cache_manager.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from diskcache import Cache
2
+
3
+ video_cache = Cache("/tmp/hunyuan_videos")
4
+
5
+ @video_cache.memoize(expire=3600)
6
+ def cached_generation(image, params):
7
+ return generate_video(image, **params)
config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "HUNYUAN_MODEL": "AeroScripts/leapfusion-hunyuan-image2video",
3
+ "MAX_FRAME_SIZE": 960,
4
+ "QUANTIZATION_MODE": "fp8",
5
+ "SAFETY_CHECKER": "disabled"
6
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # requirements.txt
2
+ gradio==4.12.0
3
+ torch==2.3.1
4
+ accelerate==0.30.0
5
+ transformers==4.40.0
6
+ diffusers==0.28.0
7
+ safetensors==0.4.2
8
+ huggingface_hub