ruslanmv commited on
Commit
251c9c7
·
verified ·
1 Parent(s): 9b118c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -12,7 +12,18 @@ import os
12
  import glob
13
  import subprocess
14
  import imageio_ffmpeg
 
15
 
 
 
 
 
 
 
 
 
 
 
16
  # Ensure 'punkt' is downloaded for nltk
17
  try:
18
  nltk.data.find('tokenizers/punkt')
@@ -36,7 +47,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
  model.to(device)
37
  print(device)
38
 
39
-
40
  def get_output_video(text):
41
  inputs = tokenizer(text,
42
  max_length=1024,
@@ -55,7 +66,6 @@ def get_output_video(text):
55
  Set the device to either "cuda" or "cpu". Once everything has finished initializing,
56
  float32 is faster than float16 but uses more GPU memory.
57
  '''
58
-
59
  def generate_image(
60
  is_mega: bool,
61
  text: str,
 
12
  import glob
13
  import subprocess
14
  import imageio_ffmpeg
15
+ import os
16
 
17
+ # Define a fallback for environments without GPU
18
+ if os.environ.get("SPACES_ZERO_GPU") is not None:
19
+ import spaces
20
+ else:
21
+ class spaces:
22
+ @staticmethod
23
+ def GPU(func):
24
+ def wrapper(*args, **kwargs):
25
+ return func(*args, **kwargs)
26
+ return wrapper
27
  # Ensure 'punkt' is downloaded for nltk
28
  try:
29
  nltk.data.find('tokenizers/punkt')
 
47
  model.to(device)
48
  print(device)
49
 
50
+ @spaces.GPU(duration=60 * 3)
51
  def get_output_video(text):
52
  inputs = tokenizer(text,
53
  max_length=1024,
 
66
  Set the device to either "cuda" or "cpu". Once everything has finished initializing,
67
  float32 is faster than float16 but uses more GPU memory.
68
  '''
 
69
  def generate_image(
70
  is_mega: bool,
71
  text: str,