Update README.md
Browse files
README.md
CHANGED
@@ -12,20 +12,20 @@ pipeline_tag: automatic-speech-recognition
|
|
12 |
library_name: transformers
|
13 |
---
|
14 |
how to use the model in colab:
|
15 |
-
|
16 |
pip install torch torchaudio transformers librosa gradio
|
17 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
18 |
import torch
|
19 |
|
20 |
-
#
|
21 |
model_name = "hackergeek98/tinyyyy_whisper"
|
22 |
processor = WhisperProcessor.from_pretrained(model_name)
|
23 |
model = WhisperForConditionalGeneration.from_pretrained(model_name)
|
24 |
|
25 |
-
#
|
26 |
model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="fa", task="transcribe")
|
27 |
|
28 |
-
#
|
29 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
model.to(device)
|
31 |
import librosa
|
@@ -46,10 +46,10 @@ def transcribe_audio(audio_file):
|
|
46 |
return transcription
|
47 |
from google.colab import files
|
48 |
|
49 |
-
#
|
50 |
uploaded = files.upload()
|
51 |
audio_file = list(uploaded.keys())[0]
|
52 |
|
53 |
-
#
|
54 |
transcription = transcribe_audio(audio_file)
|
55 |
print("Transcription:", transcription)
|
|
|
12 |
library_name: transformers
|
13 |
---
|
14 |
how to use the model in colab:
|
15 |
+
#start
|
16 |
pip install torch torchaudio transformers librosa gradio
|
17 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
18 |
import torch
|
19 |
|
20 |
+
#Load your fine-tuned Whisper model and processor
|
21 |
model_name = "hackergeek98/tinyyyy_whisper"
|
22 |
processor = WhisperProcessor.from_pretrained(model_name)
|
23 |
model = WhisperForConditionalGeneration.from_pretrained(model_name)
|
24 |
|
25 |
+
#Force the model to transcribe in Persian
|
26 |
model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="fa", task="transcribe")
|
27 |
|
28 |
+
#Move model to GPU if available
|
29 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
model.to(device)
|
31 |
import librosa
|
|
|
46 |
return transcription
|
47 |
from google.colab import files
|
48 |
|
49 |
+
#Upload an audio file
|
50 |
uploaded = files.upload()
|
51 |
audio_file = list(uploaded.keys())[0]
|
52 |
|
53 |
+
#Transcribe the audio
|
54 |
transcription = transcribe_audio(audio_file)
|
55 |
print("Transcription:", transcription)
|