VictorKai1996NUS commited on
Commit
8cf4944
·
verified ·
1 Parent(s): 0af4f53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -60
app.py CHANGED
@@ -1,7 +1,4 @@
1
  import os
2
-
3
- os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
4
-
5
  import torch
6
  from openai import OpenAI
7
  from time import time
@@ -13,6 +10,11 @@ from videosys import CogVideoConfig, VideoSysEngine
13
  from videosys.models.cogvideo.pipeline import CogVideoPABConfig
14
  import psutil
15
  import GPUtil
 
 
 
 
 
16
 
17
  logging.basicConfig(level=logging.INFO)
18
  logger = logging.getLogger(__name__)
@@ -31,6 +33,15 @@ Other times the user will not want modifications , but instead want a new image
31
  Video descriptions must have the same num of words as examples below. Extra words will be ignored.
32
  """
33
 
 
 
 
 
 
 
 
 
 
34
  def convert_prompt(prompt: str, retry_times: int = 3) -> str:
35
  if not os.environ.get("OPENAI_API_KEY"):
36
  return prompt
@@ -101,30 +112,68 @@ def generate(engine, prompt, num_inference_steps=50, guidance_scale=6.0):
101
  logger.error(f"An error occurred: {str(e)}")
102
  return None
103
 
104
-
105
  def get_server_status():
106
  cpu_percent = psutil.cpu_percent()
107
  memory = psutil.virtual_memory()
108
  disk = psutil.disk_usage('/')
109
- gpus = GPUtil.getGPUs()
110
- gpu_info = []
111
- for gpu in gpus:
112
- gpu_info.append({
113
- 'id': gpu.id,
114
- 'name': gpu.name,
115
- 'load': f"{gpu.load*100:.1f}%",
116
- 'memory_used': f"{gpu.memoryUsed}MB",
117
- 'memory_total': f"{gpu.memoryTotal}MB"
118
- })
119
 
120
  return {
121
  'cpu': f"{cpu_percent}%",
122
  'memory': f"{memory.percent}%",
123
  'disk': f"{disk.percent}%",
124
- 'gpu': gpu_info
125
  }
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
 
 
 
128
 
129
  css = """
130
  body {
@@ -253,56 +302,27 @@ with gr.Blocks(css=css) as demo:
253
  download_video_button_vs = gr.File(label="📥 Download Video", visible=False)
254
  elapsed_time_vs = gr.Textbox(label="Elapsed Time", value="0s", visible=False)
255
 
256
-
257
-
 
 
 
 
 
258
 
259
  def generate_vanilla(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
260
- engine = load_model()
261
- t = time()
262
- video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
263
- elapsed_time = time() - t
264
- video_update = gr.update(visible=True, value=video_path)
265
- elapsed_time = gr.update(visible=True, value=f"{elapsed_time:.2f}s")
266
-
267
- return video_path, video_update, elapsed_time
268
 
269
  def generate_vs(prompt, num_inference_steps, guidance_scale, threshold, gap, progress=gr.Progress(track_tqdm=True)):
270
  threshold = [int(i) for i in threshold.split(",")]
271
  gap = int(gap)
272
- engine = load_model(enable_video_sys=True, pab_threshold=threshold, pab_gap=gap)
273
- t = time()
274
- video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
275
- elapsed_time = time() - t
276
- video_update = gr.update(visible=True, value=video_path)
277
- elapsed_time = gr.update(visible=True, value=f"{elapsed_time:.2f}s")
278
-
279
- return video_path, video_update, elapsed_time
280
 
281
  def enhance_prompt_func(prompt):
282
  return convert_prompt(prompt, retry_times=1)
283
 
284
- def get_server_status():
285
- cpu_percent = psutil.cpu_percent()
286
- memory = psutil.virtual_memory()
287
- disk = psutil.disk_usage('/')
288
- try:
289
- gpus = GPUtil.getGPUs()
290
- if gpus:
291
- gpu = gpus[0] # 只获取第一个GPU的信息
292
- gpu_memory = f"{gpu.memoryUsed}/{gpu.memoryTotal}MB ({gpu.memoryUtil*100:.1f}%)"
293
- else:
294
- gpu_memory = "No GPU found"
295
- except:
296
- gpu_memory = "GPU information unavailable"
297
-
298
- return {
299
- 'cpu': f"{cpu_percent}%",
300
- 'memory': f"{memory.percent}%",
301
- 'disk': f"{disk.percent}%",
302
- 'gpu_memory': gpu_memory
303
- }
304
-
305
-
306
  def update_server_status():
307
  status = get_server_status()
308
  return (
@@ -312,25 +332,29 @@ with gr.Blocks(css=css) as demo:
312
  status['gpu_memory']
313
  )
314
 
 
 
315
 
316
  generate_button.click(
317
  generate_vanilla,
318
  inputs=[prompt, num_inference_steps, guidance_scale],
319
- outputs=[video_output, download_video_button, elapsed_time],
320
  )
321
 
322
  generate_button_vs.click(
323
  generate_vs,
324
  inputs=[prompt, num_inference_steps, guidance_scale, pab_threshold, pab_gap],
325
- outputs=[video_output_vs, download_video_button_vs, elapsed_time_vs],
326
  )
327
 
328
  enhance_button.click(enhance_prompt_func, inputs=[prompt], outputs=[prompt])
329
 
330
-
331
  refresh_button.click(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status])
332
  demo.load(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status], every=1)
333
 
 
 
 
334
  if __name__ == "__main__":
335
- demo.queue(max_size=10, default_concurrency_limit=1)
336
- demo.launch()
 
1
  import os
 
 
 
2
  import torch
3
  from openai import OpenAI
4
  from time import time
 
10
  from videosys.models.cogvideo.pipeline import CogVideoPABConfig
11
  import psutil
12
  import GPUtil
13
+ import queue
14
+ import threading
15
+ import pandas as pd
16
+
17
+ os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
18
 
19
  logging.basicConfig(level=logging.INFO)
20
  logger = logging.getLogger(__name__)
 
33
  Video descriptions must have the same num of words as examples below. Extra words will be ignored.
34
  """
35
 
36
+ # 创建一个全局任务队列
37
+ task_queue = queue.Queue()
38
+
39
+ # 创建一个锁来保护共享资源
40
+ lock = threading.Lock()
41
+
42
+ # 创建一个列表来存储所有任务的状态
43
+ tasks = []
44
+
45
  def convert_prompt(prompt: str, retry_times: int = 3) -> str:
46
  if not os.environ.get("OPENAI_API_KEY"):
47
  return prompt
 
112
  logger.error(f"An error occurred: {str(e)}")
113
  return None
114
 
 
115
  def get_server_status():
116
  cpu_percent = psutil.cpu_percent()
117
  memory = psutil.virtual_memory()
118
  disk = psutil.disk_usage('/')
119
+ try:
120
+ gpus = GPUtil.getGPUs()
121
+ if gpus:
122
+ gpu = gpus[0] # 只获取第一个GPU的信息
123
+ gpu_memory = f"{gpu.memoryUsed}/{gpu.memoryTotal}MB ({gpu.memoryUtil*100:.1f}%)"
124
+ else:
125
+ gpu_memory = "No GPU found"
126
+ except:
127
+ gpu_memory = "GPU information unavailable"
 
128
 
129
  return {
130
  'cpu': f"{cpu_percent}%",
131
  'memory': f"{memory.percent}%",
132
  'disk': f"{disk.percent}%",
133
+ 'gpu_memory': gpu_memory
134
  }
135
 
136
+ def task_processor():
137
+ while True:
138
+ task = task_queue.get()
139
+ if task is None:
140
+ break
141
+
142
+ # 更新任务状态为"运行中"
143
+ with lock:
144
+ task['status'] = 'running'
145
+
146
+ # 执行任务
147
+ result = task['function'](*task['args'])
148
+
149
+ # 更新任务状态为"完成"
150
+ with lock:
151
+ task['status'] = 'completed'
152
+ task['result'] = result
153
+
154
+ task_queue.task_done()
155
+
156
+ # 启动任务处理器线程
157
+ processor_thread = threading.Thread(target=task_processor)
158
+ processor_thread.start()
159
+
160
+ def add_task(function, args, task_name):
161
+ task = {
162
+ 'id': len(tasks),
163
+ 'name': task_name,
164
+ 'status': 'waiting',
165
+ 'function': function,
166
+ 'args': args,
167
+ 'result': None
168
+ }
169
+ with lock:
170
+ tasks.append(task)
171
+ task_queue.put(task)
172
+ return task['id']
173
 
174
+ def get_task_status():
175
+ with lock:
176
+ return pd.DataFrame([{'ID': task['id'], 'Task': task['name'], 'Status': task['status']} for task in tasks])
177
 
178
  css = """
179
  body {
 
302
  download_video_button_vs = gr.File(label="📥 Download Video", visible=False)
303
  elapsed_time_vs = gr.Textbox(label="Elapsed Time", value="0s", visible=False)
304
 
305
+ with gr.Row():
306
+ task_status = gr.Dataframe(
307
+ headers=["ID", "Task", "Status"],
308
+ label="Task Queue",
309
+ interactive=False
310
+ )
311
+ refresh_tasks_button = gr.Button("Refresh Tasks")
312
 
313
  def generate_vanilla(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
314
+ task_id = add_task(generate, (load_model(), prompt, num_inference_steps, guidance_scale), f"Generate: {prompt[:20]}...")
315
+ return get_task_status()
 
 
 
 
 
 
316
 
317
  def generate_vs(prompt, num_inference_steps, guidance_scale, threshold, gap, progress=gr.Progress(track_tqdm=True)):
318
  threshold = [int(i) for i in threshold.split(",")]
319
  gap = int(gap)
320
+ task_id = add_task(generate, (load_model(enable_video_sys=True, pab_threshold=threshold, pab_gap=gap), prompt, num_inference_steps, guidance_scale), f"Generate VS: {prompt[:20]}...")
321
+ return get_task_status()
 
 
 
 
 
 
322
 
323
  def enhance_prompt_func(prompt):
324
  return convert_prompt(prompt, retry_times=1)
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  def update_server_status():
327
  status = get_server_status()
328
  return (
 
332
  status['gpu_memory']
333
  )
334
 
335
+ def update_task_status():
336
+ return get_task_status()
337
 
338
  generate_button.click(
339
  generate_vanilla,
340
  inputs=[prompt, num_inference_steps, guidance_scale],
341
+ outputs=[task_status]
342
  )
343
 
344
  generate_button_vs.click(
345
  generate_vs,
346
  inputs=[prompt, num_inference_steps, guidance_scale, pab_threshold, pab_gap],
347
+ outputs=[task_status]
348
  )
349
 
350
  enhance_button.click(enhance_prompt_func, inputs=[prompt], outputs=[prompt])
351
 
 
352
  refresh_button.click(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status])
353
  demo.load(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status], every=1)
354
 
355
+ refresh_tasks_button.click(update_task_status, outputs=[task_status])
356
+ demo.load(update_task_status, outputs=[task_status], every=5) # 每5秒自动刷新一次
357
+
358
  if __name__ == "__main__":
359
+ demo.queue(max_size=10, concurrency_count=1)
360
+ demo.launch()