Spaces:
Sleeping
Sleeping
delete access token from env
Browse files- .gitattributes +0 -1
- .gitignore +0 -0
- app.py +2 -9
.gitattributes
CHANGED
@@ -33,4 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
.env
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
.gitignore
DELETED
Binary file (14 Bytes)
|
|
app.py
CHANGED
@@ -5,12 +5,6 @@ import numpy as np
|
|
5 |
import webrtcvad
|
6 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration, pipeline
|
7 |
|
8 |
-
from dotenv import load_dotenv
|
9 |
-
import os
|
10 |
-
|
11 |
-
load_dotenv() # Loads the .env file
|
12 |
-
token = os.getenv("HUGGINGFACE_TOKEN")
|
13 |
-
|
14 |
|
15 |
# Model names
|
16 |
TN_MODEL_NAME = "amenIKh/Tunisian_Checkpoint12"
|
@@ -21,13 +15,12 @@ pipe_tn = pipeline(
|
|
21 |
task="automatic-speech-recognition",
|
22 |
model=TN_MODEL_NAME,
|
23 |
device=0 if torch.cuda.is_available() else -1,
|
24 |
-
use_auth_token=token
|
25 |
|
26 |
)
|
27 |
|
28 |
# Load Whisper model and processor
|
29 |
-
whisper_model = WhisperForConditionalGeneration.from_pretrained(WHISPER_MODEL_NAME
|
30 |
-
whisper_processor = WhisperProcessor.from_pretrained(WHISPER_MODEL_NAME
|
31 |
|
32 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
33 |
whisper_model.to(device)
|
|
|
5 |
import webrtcvad
|
6 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration, pipeline
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Model names
|
10 |
TN_MODEL_NAME = "amenIKh/Tunisian_Checkpoint12"
|
|
|
15 |
task="automatic-speech-recognition",
|
16 |
model=TN_MODEL_NAME,
|
17 |
device=0 if torch.cuda.is_available() else -1,
|
|
|
18 |
|
19 |
)
|
20 |
|
21 |
# Load Whisper model and processor
|
22 |
+
whisper_model = WhisperForConditionalGeneration.from_pretrained(WHISPER_MODEL_NAME)
|
23 |
+
whisper_processor = WhisperProcessor.from_pretrained(WHISPER_MODEL_NAME)
|
24 |
|
25 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
26 |
whisper_model.to(device)
|