cocktailpeanut commited on
Commit
a0f0224
·
1 Parent(s): a040b19
Files changed (2) hide show
  1. app.py +14 -8
  2. requirements.txt +1 -0
app.py CHANGED
@@ -20,11 +20,10 @@ import tempfile
20
  import os
21
  import gc
22
  from openai import OpenAI
 
23
 
24
  # Load Hugging Face token if needed
25
- hf_token = os.getenv("HF_TOKEN")
26
- openai_api_key = os.getenv("OPENAI_API_KEY")
27
- client = OpenAI(api_key=openai_api_key)
28
  system_prompt_t2v_path = "assets/system_prompt_t2v.txt"
29
  system_prompt_i2v_path = "assets/system_prompt_i2v.txt"
30
  with open(system_prompt_t2v_path, "r") as f:
@@ -37,7 +36,8 @@ with open(system_prompt_i2v_path, "r") as f:
37
  model_path = "asset"
38
  if not os.path.exists(model_path):
39
  snapshot_download(
40
- "Lightricks/LTX-Video", local_dir=model_path, repo_type="model", token=hf_token
 
41
  )
42
 
43
  # Global variables to load components
@@ -45,7 +45,8 @@ vae_dir = Path(model_path) / "vae"
45
  unet_dir = Path(model_path) / "unet"
46
  scheduler_dir = Path(model_path) / "scheduler"
47
 
48
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
49
 
50
 
51
  def load_vae(vae_dir):
@@ -113,6 +114,8 @@ def enhance_prompt_if_enabled(prompt, enhance_toggle, type="t2v"):
113
  ]
114
 
115
  try:
 
 
116
  response = client.chat.completions.create(
117
  model="gpt-4o-mini",
118
  messages=messages,
@@ -261,7 +264,8 @@ def generate_video_from_text(
261
  duration=5,
262
  )
263
  finally:
264
- torch.cuda.empty_cache()
 
265
  gc.collect()
266
 
267
  output_path = tempfile.mktemp(suffix=".mp4")
@@ -278,7 +282,8 @@ def generate_video_from_text(
278
  # Explicitly delete tensors and clear cache
279
  del images
280
  del video_np
281
- torch.cuda.empty_cache()
 
282
  return output_path
283
 
284
 
@@ -366,7 +371,8 @@ def generate_video_from_image(
366
  )
367
 
368
  finally:
369
- torch.cuda.empty_cache()
 
370
  gc.collect()
371
 
372
  return output_path
 
20
  import os
21
  import gc
22
  from openai import OpenAI
23
+ import devicetorch
24
 
25
  # Load Hugging Face token if needed
26
+ #hf_token = os.getenv("HF_TOKEN")
 
 
27
  system_prompt_t2v_path = "assets/system_prompt_t2v.txt"
28
  system_prompt_i2v_path = "assets/system_prompt_i2v.txt"
29
  with open(system_prompt_t2v_path, "r") as f:
 
36
  model_path = "asset"
37
  if not os.path.exists(model_path):
38
  snapshot_download(
39
+ #"Lightricks/LTX-Video", local_dir=model_path, repo_type="model", token=hf_token
40
+ "Lightricks/LTX-Video", local_dir=model_path, repo_type="model",
41
  )
42
 
43
  # Global variables to load components
 
45
  unet_dir = Path(model_path) / "unet"
46
  scheduler_dir = Path(model_path) / "scheduler"
47
 
48
+ #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
49
+ device = devictorch.get(torch)
50
 
51
 
52
  def load_vae(vae_dir):
 
114
  ]
115
 
116
  try:
117
+ openai_api_key = os.getenv("OPENAI_API_KEY")
118
+ client = OpenAI(api_key=openai_api_key)
119
  response = client.chat.completions.create(
120
  model="gpt-4o-mini",
121
  messages=messages,
 
264
  duration=5,
265
  )
266
  finally:
267
+ devicetorch.empty_cache(torch)
268
+ # torch.cuda.empty_cache()
269
  gc.collect()
270
 
271
  output_path = tempfile.mktemp(suffix=".mp4")
 
282
  # Explicitly delete tensors and clear cache
283
  del images
284
  del video_np
285
+ #torch.cuda.empty_cache()
286
+ devicetorch.empty_cache(torch)
287
  return output_path
288
 
289
 
 
371
  )
372
 
373
  finally:
374
+ #torch.cuda.empty_cache()
375
+ devicetorch.empty_cache(torch)
376
  gc.collect()
377
 
378
  return output_path
requirements.txt CHANGED
@@ -12,3 +12,4 @@ ftfy
12
  gradio
13
  openai
14
  gradio_toggle
 
 
12
  gradio
13
  openai
14
  gradio_toggle
15
+ numpy==2.1.3