Thecoder3281f commited on
Commit
11a8a73
·
verified ·
1 Parent(s): 07e3bf6

End of training

Browse files
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ language:
4
+ - en
5
+ license: apache-2.0
6
+ base_model: openai/whisper-tiny
7
+ tags:
8
+ - generated_from_trainer
9
+ datasets:
10
+ - mozilla-foundation/common_voice_17_0
11
+ metrics:
12
+ - wer
13
+ model-index:
14
+ - name: Whisper Tiny English (1000 steps) - Jarrett Er
15
+ results:
16
+ - task:
17
+ type: automatic-speech-recognition
18
+ name: Automatic Speech Recognition
19
+ dataset:
20
+ name: Common Voice 17.0
21
+ type: mozilla-foundation/common_voice_17_0
22
+ config: en
23
+ split: train
24
+ args: 'config: en, split: train'
25
+ metrics:
26
+ - type: wer
27
+ value: 29.041916167664674
28
+ name: Wer
29
+ ---
30
+
31
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
32
+ should probably proofread and complete it, then remove this comment. -->
33
+
34
+ # Whisper Tiny English (1000 steps) - Jarrett Er
35
+
36
+ This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the Common Voice 17.0 dataset.
37
+ It achieves the following results on the evaluation set:
38
+ - Loss: 0.6690
39
+ - Wer: 29.0419
40
+
41
+ ## Model description
42
+
43
+ More information needed
44
+
45
+ ## Intended uses & limitations
46
+
47
+ More information needed
48
+
49
+ ## Training and evaluation data
50
+
51
+ More information needed
52
+
53
+ ## Training procedure
54
+
55
+ ### Training hyperparameters
56
+
57
+ The following hyperparameters were used during training:
58
+ - learning_rate: 1e-05
59
+ - train_batch_size: 16
60
+ - eval_batch_size: 8
61
+ - seed: 42
62
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
63
+ - lr_scheduler_type: linear
64
+ - lr_scheduler_warmup_steps: 500
65
+ - training_steps: 1000
66
+ - mixed_precision_training: Native AMP
67
+
68
+ ### Training results
69
+
70
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
71
+ |:-------------:|:------:|:----:|:---------------:|:-------:|
72
+ | 0.4359 | 17.031 | 1000 | 0.6690 | 29.0419 |
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - PEFT 0.14.0
78
+ - Transformers 4.48.0.dev0
79
+ - Pytorch 2.5.1+cu124
80
+ - Datasets 3.2.1.dev0
81
+ - Tokenizers 0.21.0
adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "WhisperForConditionalGeneration",
5
+ "parent_library": "transformers.models.whisper.modeling_whisper"
6
+ },
7
+ "base_model_name_or_path": "openai/whisper-tiny",
8
+ "bias": "none",
9
+ "eva_config": null,
10
+ "exclude_modules": null,
11
+ "fan_in_fan_out": false,
12
+ "inference_mode": true,
13
+ "init_lora_weights": true,
14
+ "layer_replication": null,
15
+ "layers_pattern": null,
16
+ "layers_to_transform": null,
17
+ "loftq_config": {},
18
+ "lora_alpha": 64,
19
+ "lora_bias": false,
20
+ "lora_dropout": 0.1,
21
+ "megatron_config": null,
22
+ "megatron_core": "megatron.core",
23
+ "modules_to_save": null,
24
+ "peft_type": "LORA",
25
+ "r": 8,
26
+ "rank_pattern": {},
27
+ "revision": null,
28
+ "target_modules": [
29
+ "fc2",
30
+ "fc1",
31
+ "out_proj",
32
+ "q_proj",
33
+ "k_proj",
34
+ "v_proj"
35
+ ],
36
+ "task_type": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2c7ab9fb9a3e7e6c64688f9636e9c2b2d41f19e29acae4c6ac262223340a1e8
3
+ size 2179992
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
runs/Dec19_15-25-29_jarrett-ROG-Zephyrus-M16-GU604VI-GU604VI/events.out.tfevents.1734593154.jarrett-ROG-Zephyrus-M16-GU604VI-GU604VI.11294.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c5ddd0b7c38e465cb650ffe2a0c3b05bafb8ed98f9c5fdd6cd350fd500ec33
3
+ size 6918
runs/Dec19_15-26-08_jarrett-ROG-Zephyrus-M16-GU604VI-GU604VI/events.out.tfevents.1734593172.jarrett-ROG-Zephyrus-M16-GU604VI-GU604VI.11294.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f7c6320ab3a0fe7aa82b32d87b168adc7a7b3e887b80a8e808b505610e1655d
3
+ size 16011
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f97e886a7584d66651c0ecd9b16d0d5cfaf57b5b1ec882b601adf29a09fb1be
3
+ size 5560