fantos commited on
Commit
f3a8315
·
verified ·
1 Parent(s): 7c5708f

Delete app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +0 -186
app-backup.py DELETED
@@ -1,186 +0,0 @@
1
- import os
2
-
3
- os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
4
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
5
-
6
- import uuid
7
-
8
- import GPUtil
9
- import gradio as gr
10
- import psutil
11
- import spaces
12
-
13
- from videosys import CogVideoXConfig, CogVideoXPABConfig, VideoSysEngine
14
-
15
-
16
- def load_model(model_name, enable_video_sys=False, pab_threshold=[100, 850], pab_range=2):
17
- pab_config = CogVideoXPABConfig(spatial_threshold=pab_threshold, spatial_range=pab_range)
18
- config = CogVideoXConfig(model_name, enable_pab=enable_video_sys, pab_config=pab_config)
19
- engine = VideoSysEngine(config)
20
- return engine
21
-
22
-
23
- def generate(engine, prompt, num_inference_steps=50, guidance_scale=6.0):
24
- video = engine.generate(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).video[0]
25
-
26
- unique_filename = f"{uuid.uuid4().hex}.mp4"
27
- output_path = os.path.join("./.tmp_outputs", unique_filename)
28
-
29
- engine.save_video(video, output_path)
30
- return output_path
31
-
32
-
33
- def get_server_status():
34
- cpu_percent = psutil.cpu_percent()
35
- memory = psutil.virtual_memory()
36
- disk = psutil.disk_usage("/")
37
- gpus = GPUtil.getGPUs()
38
- gpu_info = []
39
- for gpu in gpus:
40
- gpu_info.append(
41
- {
42
- "id": gpu.id,
43
- "name": gpu.name,
44
- "load": f"{gpu.load*100:.1f}%",
45
- "memory_used": f"{gpu.memoryUsed}MB",
46
- "memory_total": f"{gpu.memoryTotal}MB",
47
- }
48
- )
49
-
50
- return {"cpu": f"{cpu_percent}%", "memory": f"{memory.percent}%", "disk": f"{disk.percent}%", "gpu": gpu_info}
51
-
52
-
53
- @spaces.GPU()
54
- def generate_vanilla(model_name, prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
55
- engine = load_model(model_name)
56
- video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
57
- return video_path
58
-
59
-
60
- @spaces.GPU()
61
- def generate_vs(
62
- model_name,
63
- prompt,
64
- num_inference_steps,
65
- guidance_scale,
66
- threshold_start,
67
- threshold_end,
68
- gap,
69
- progress=gr.Progress(track_tqdm=True),
70
- ):
71
- threshold = [int(threshold_end), int(threshold_start)]
72
- gap = int(gap)
73
- engine = load_model(model_name, enable_video_sys=True, pab_threshold=threshold, pab_range=gap)
74
- video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
75
- return video_path
76
-
77
-
78
- def get_server_status():
79
- cpu_percent = psutil.cpu_percent()
80
- memory = psutil.virtual_memory()
81
- disk = psutil.disk_usage("/")
82
- try:
83
- gpus = GPUtil.getGPUs()
84
- if gpus:
85
- gpu = gpus[0]
86
- gpu_memory = f"{gpu.memoryUsed}/{gpu.memoryTotal}MB ({gpu.memoryUtil*100:.1f}%)"
87
- else:
88
- gpu_memory = "No GPU found"
89
- except:
90
- gpu_memory = "GPU information unavailable"
91
-
92
- return {
93
- "cpu": f"{cpu_percent}%",
94
- "memory": f"{memory.percent}%",
95
- "disk": f"{disk.percent}%",
96
- "gpu_memory": gpu_memory,
97
- }
98
-
99
-
100
- def update_server_status():
101
- status = get_server_status()
102
- return (status["cpu"], status["memory"], status["disk"], status["gpu_memory"])
103
-
104
-
105
- css = """
106
- footer {
107
- visibility: hidden;
108
- }
109
- """
110
-
111
-
112
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
113
-
114
-
115
- with gr.Row():
116
- with gr.Column():
117
- prompt = gr.Textbox(label="Prompt (Less than 200 Words)", value="Sunset over the sea.", lines=3)
118
-
119
- with gr.Column():
120
- gr.Markdown("**Generation Parameters**<br>")
121
- with gr.Row():
122
- model_name = gr.Radio(
123
- ["THUDM/CogVideoX-2b", "THUDM/CogVideoX-5b"], label="Model Type", value="THUDM/CogVideoX-2b"
124
- )
125
- with gr.Row():
126
- num_inference_steps = gr.Number(label="Inference Steps", value=50)
127
- guidance_scale = gr.Number(label="Guidance Scale", value=6.0)
128
- with gr.Row():
129
- pab_range = gr.Number(
130
- label="PAB Broadcast Range", value=2, precision=0, info="Broadcast timesteps range."
131
- )
132
- pab_threshold_start = gr.Number(label="PAB Start Timestep", value=850, info="Start from step 1000.")
133
- pab_threshold_end = gr.Number(label="PAB End Timestep", value=100, info="End at step 0.")
134
- with gr.Row():
135
- generate_button_vs = gr.Button("⚡️ Generate Video with VideoSys (Faster)")
136
- generate_button = gr.Button("🎬 Generate Video (Original)")
137
- with gr.Column(elem_classes="server-status"):
138
- gr.Markdown("#### Server Status")
139
-
140
- with gr.Row():
141
- cpu_status = gr.Textbox(label="CPU", scale=1)
142
- memory_status = gr.Textbox(label="Memory", scale=1)
143
-
144
- with gr.Row():
145
- disk_status = gr.Textbox(label="Disk", scale=1)
146
- gpu_status = gr.Textbox(label="GPU Memory", scale=1)
147
-
148
- with gr.Row():
149
- refresh_button = gr.Button("Refresh")
150
-
151
- with gr.Column():
152
- with gr.Row():
153
- video_output_vs = gr.Video(label="CogVideoX with VideoSys", width=720, height=480)
154
- with gr.Row():
155
- video_output = gr.Video(label="CogVideoX", width=720, height=480)
156
-
157
- generate_button.click(
158
- generate_vanilla,
159
- inputs=[model_name, prompt, num_inference_steps, guidance_scale],
160
- outputs=[video_output],
161
- concurrency_id="gen",
162
- concurrency_limit=1,
163
- )
164
-
165
- generate_button_vs.click(
166
- generate_vs,
167
- inputs=[
168
- model_name,
169
- prompt,
170
- num_inference_steps,
171
- guidance_scale,
172
- pab_threshold_start,
173
- pab_threshold_end,
174
- pab_range,
175
- ],
176
- outputs=[video_output_vs],
177
- concurrency_id="gen",
178
- concurrency_limit=1,
179
- )
180
-
181
- refresh_button.click(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status])
182
- demo.load(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status], every=1)
183
-
184
- if __name__ == "__main__":
185
- demo.queue(max_size=10, default_concurrency_limit=1)
186
- demo.launch()