jhj0517 commited on
Commit
d94782b
·
1 Parent(s): f94a0c1

refactor progress

Browse files
modules/whisper/faster_whisper_inference.py CHANGED
@@ -35,8 +35,8 @@ class FasterWhisperInference(WhisperBase):
35
  @spaces.GPU(duration=120)
36
  def transcribe(self,
37
  audio: Union[str, BinaryIO, np.ndarray],
38
- progress: gr.Progress,
39
  *whisper_params,
 
40
  ) -> Tuple[List[dict], float]:
41
  """
42
  transcribe method for faster-whisper.
@@ -96,7 +96,7 @@ class FasterWhisperInference(WhisperBase):
96
  def update_model(self,
97
  model_size: str,
98
  compute_type: str,
99
- progress: gr.Progress
100
  ):
101
  """
102
  Update current model setting
 
35
  @spaces.GPU(duration=120)
36
  def transcribe(self,
37
  audio: Union[str, BinaryIO, np.ndarray],
 
38
  *whisper_params,
39
+ progress: gr.Progress = gr.Progress(),
40
  ) -> Tuple[List[dict], float]:
41
  """
42
  transcribe method for faster-whisper.
 
96
  def update_model(self,
97
  model_size: str,
98
  compute_type: str,
99
+ progress: gr.Progress = gr.Progress(),
100
  ):
101
  """
102
  Update current model setting
modules/whisper/whisper_base.py CHANGED
@@ -45,8 +45,8 @@ class WhisperBase(ABC):
45
  @spaces.GPU(duration=120)
46
  def transcribe(self,
47
  audio: Union[str, BinaryIO, np.ndarray],
48
- progress: gr.Progress,
49
  *whisper_params,
 
50
  ):
51
  pass
52
 
@@ -55,15 +55,15 @@ class WhisperBase(ABC):
55
  def update_model(self,
56
  model_size: str,
57
  compute_type: str,
58
- progress: gr.Progress
59
  ):
60
  pass
61
 
62
  @spaces.GPU(duration=120)
63
  def run(self,
64
  audio: Union[str, BinaryIO, np.ndarray],
65
- progress: gr.Progress,
66
  *whisper_params,
 
67
  ) -> Tuple[List[dict], float]:
68
  """
69
  Run transcription with conditional pre-processing and post-processing.
@@ -111,8 +111,8 @@ class WhisperBase(ABC):
111
 
112
  result, elapsed_time = self.transcribe(
113
  audio,
114
- progress,
115
- *astuple(params)
116
  )
117
 
118
  if params.is_diarize:
@@ -162,8 +162,8 @@ class WhisperBase(ABC):
162
  print("run started: ")
163
  transcribed_segments, time_for_task = self.run(
164
  file.name,
165
- progress,
166
  *whisper_params,
 
167
  )
168
  print("run finished: ")
169
 
@@ -203,8 +203,8 @@ class WhisperBase(ABC):
203
  def transcribe_mic(self,
204
  mic_audio: str,
205
  file_format: str,
206
- progress=gr.Progress(),
207
  *whisper_params,
 
208
  ) -> list:
209
  """
210
  Write subtitle file from microphone
@@ -231,8 +231,8 @@ class WhisperBase(ABC):
231
  progress(0, desc="Loading Audio..")
232
  transcribed_segments, time_for_task = self.run(
233
  mic_audio,
234
- progress,
235
  *whisper_params,
 
236
  )
237
  progress(1, desc="Completed!")
238
 
@@ -257,8 +257,8 @@ class WhisperBase(ABC):
257
  youtube_link: str,
258
  file_format: str,
259
  add_timestamp: bool,
260
- progress=gr.Progress(),
261
  *whisper_params,
 
262
  ) -> list:
263
  """
264
  Write subtitle file from Youtube
@@ -290,8 +290,8 @@ class WhisperBase(ABC):
290
 
291
  transcribed_segments, time_for_task = self.run(
292
  audio,
293
- progress,
294
  *whisper_params,
 
295
  )
296
 
297
  progress(1, desc="Completed!")
 
45
  @spaces.GPU(duration=120)
46
  def transcribe(self,
47
  audio: Union[str, BinaryIO, np.ndarray],
 
48
  *whisper_params,
49
+ progress: gr.Progress = gr.Progress(),
50
  ):
51
  pass
52
 
 
55
  def update_model(self,
56
  model_size: str,
57
  compute_type: str,
58
+ progress: gr.Progress = gr.Progress(),
59
  ):
60
  pass
61
 
62
  @spaces.GPU(duration=120)
63
  def run(self,
64
  audio: Union[str, BinaryIO, np.ndarray],
 
65
  *whisper_params,
66
+ progress: gr.Progress = gr.Progress(),
67
  ) -> Tuple[List[dict], float]:
68
  """
69
  Run transcription with conditional pre-processing and post-processing.
 
111
 
112
  result, elapsed_time = self.transcribe(
113
  audio,
114
+ *astuple(params),
115
+ progress=progress
116
  )
117
 
118
  if params.is_diarize:
 
162
  print("run started: ")
163
  transcribed_segments, time_for_task = self.run(
164
  file.name,
 
165
  *whisper_params,
166
+ progress=progress
167
  )
168
  print("run finished: ")
169
 
 
203
  def transcribe_mic(self,
204
  mic_audio: str,
205
  file_format: str,
 
206
  *whisper_params,
207
+ progress: gr.Progress = gr.Progress(),
208
  ) -> list:
209
  """
210
  Write subtitle file from microphone
 
231
  progress(0, desc="Loading Audio..")
232
  transcribed_segments, time_for_task = self.run(
233
  mic_audio,
 
234
  *whisper_params,
235
+ progress=progress
236
  )
237
  progress(1, desc="Completed!")
238
 
 
257
  youtube_link: str,
258
  file_format: str,
259
  add_timestamp: bool,
 
260
  *whisper_params,
261
+ progress: gr.Progress = gr.Progress(),
262
  ) -> list:
263
  """
264
  Write subtitle file from Youtube
 
290
 
291
  transcribed_segments, time_for_task = self.run(
292
  audio,
 
293
  *whisper_params,
294
+ progress=progress
295
  )
296
 
297
  progress(1, desc="Completed!")