jhj0517 commited on
Commit
39f5d9f
·
1 Parent(s): 3a57221

add @space decorator to every function

Browse files
modules/utils/subtitle_manager.py CHANGED
@@ -1,5 +1,7 @@
1
  import re
2
 
 
 
3
 
4
  def timeformat_srt(time):
5
  hours = time // 3600
@@ -117,7 +119,7 @@ def get_serialized_vtt(dicts):
117
  output += f'{dic["sentence"]}\n\n'
118
  return output
119
 
120
-
121
  def safe_filename(name):
122
  from app import _args
123
  INVALID_FILENAME_CHARS = r'[<>:"/\\|?*\x00-\x1f]'
 
1
  import re
2
 
3
+ # Zero GPU
4
+ import spaces
5
 
6
  def timeformat_srt(time):
7
  hours = time // 3600
 
119
  output += f'{dic["sentence"]}\n\n'
120
  return output
121
 
122
+ @spaces.GPU(duration=120)
123
  def safe_filename(name):
124
  from app import _args
125
  INVALID_FILENAME_CHARS = r'[<>:"/\\|?*\x00-\x1f]'
modules/whisper/faster_whisper_inference.py CHANGED
@@ -13,6 +13,7 @@ from argparse import Namespace
13
  from modules.whisper.whisper_parameter import *
14
  from modules.whisper.whisper_base import WhisperBase
15
 
 
16
 
17
  class FasterWhisperInference(WhisperBase):
18
  def __init__(self,
@@ -31,6 +32,7 @@ class FasterWhisperInference(WhisperBase):
31
  self.available_compute_types = self.get_available_compute_type()
32
  self.download_model(model_size="large-v2", model_dir=self.model_dir)
33
 
 
34
  def transcribe(self,
35
  audio: Union[str, BinaryIO, np.ndarray],
36
  progress: gr.Progress,
@@ -90,6 +92,7 @@ class FasterWhisperInference(WhisperBase):
90
  print("transcribe: finished")
91
  return segments_result, elapsed_time
92
 
 
93
  def update_model(self,
94
  model_size: str,
95
  compute_type: str,
 
13
  from modules.whisper.whisper_parameter import *
14
  from modules.whisper.whisper_base import WhisperBase
15
 
16
+ import spaces
17
 
18
  class FasterWhisperInference(WhisperBase):
19
  def __init__(self,
 
32
  self.available_compute_types = self.get_available_compute_type()
33
  self.download_model(model_size="large-v2", model_dir=self.model_dir)
34
 
35
+ @spaces.GPU(duration=120)
36
  def transcribe(self,
37
  audio: Union[str, BinaryIO, np.ndarray],
38
  progress: gr.Progress,
 
92
  print("transcribe: finished")
93
  return segments_result, elapsed_time
94
 
95
+ @spaces.GPU(duration=120)
96
  def update_model(self,
97
  model_size: str,
98
  compute_type: str,
modules/whisper/whisper_base.py CHANGED
@@ -42,6 +42,7 @@ class WhisperBase(ABC):
42
  self.vad = SileroVAD()
43
 
44
  @abstractmethod
 
45
  def transcribe(self,
46
  audio: Union[str, BinaryIO, np.ndarray],
47
  progress: gr.Progress,
@@ -50,6 +51,7 @@ class WhisperBase(ABC):
50
  pass
51
 
52
  @abstractmethod
 
53
  def update_model(self,
54
  model_size: str,
55
  compute_type: str,
@@ -57,6 +59,7 @@ class WhisperBase(ABC):
57
  ):
58
  pass
59
 
 
60
  def run(self,
61
  audio: Union[str, BinaryIO, np.ndarray],
62
  progress: gr.Progress,
@@ -322,6 +325,7 @@ class WhisperBase(ABC):
322
  pass
323
 
324
  @staticmethod
 
325
  def generate_and_write_file(file_name: str,
326
  transcribed_segments: list,
327
  add_timestamp: bool,
 
42
  self.vad = SileroVAD()
43
 
44
  @abstractmethod
45
+ @spaces.GPU(duration=120)
46
  def transcribe(self,
47
  audio: Union[str, BinaryIO, np.ndarray],
48
  progress: gr.Progress,
 
51
  pass
52
 
53
  @abstractmethod
54
+ @spaces.GPU(duration=120)
55
  def update_model(self,
56
  model_size: str,
57
  compute_type: str,
 
59
  ):
60
  pass
61
 
62
+ @spaces.GPU(duration=120)
63
  def run(self,
64
  audio: Union[str, BinaryIO, np.ndarray],
65
  progress: gr.Progress,
 
325
  pass
326
 
327
  @staticmethod
328
+ @spaces.GPU(duration=120)
329
  def generate_and_write_file(file_name: str,
330
  transcribed_segments: list,
331
  add_timestamp: bool,