Flocksserver commited on
Commit
ec1e9f3
·
verified ·
1 Parent(s): af070ea

End of training

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: openai/whisper-tiny
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - emodb
9
+ metrics:
10
+ - accuracy
11
+ model-index:
12
+ - name: whisper-tiny-de-emodb-emotion-classification
13
+ results:
14
+ - task:
15
+ name: Audio Classification
16
+ type: audio-classification
17
+ dataset:
18
+ name: Emo-DB
19
+ type: emodb
20
+ metrics:
21
+ - name: Accuracy
22
+ type: accuracy
23
+ value: 0.9158878504672897
24
+ ---
25
+
26
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
+ should probably proofread and complete it, then remove this comment. -->
28
+
29
+ # whisper-tiny-de-emodb-emotion-classification
30
+
31
+ This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the Emo-DB dataset.
32
+ It achieves the following results on the evaluation set:
33
+ - Loss: 0.4912
34
+ - Accuracy: 0.9159
35
+
36
+ ## Model description
37
+
38
+ More information needed
39
+
40
+ ## Intended uses & limitations
41
+
42
+ More information needed
43
+
44
+ ## Training and evaluation data
45
+
46
+ More information needed
47
+
48
+ ## Training procedure
49
+
50
+ ### Training hyperparameters
51
+
52
+ The following hyperparameters were used during training:
53
+ - learning_rate: 5e-05
54
+ - train_batch_size: 2
55
+ - eval_batch_size: 2
56
+ - seed: 42
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: linear
59
+ - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 10
61
+ - mixed_precision_training: Native AMP
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
66
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
67
+ | 1.3193 | 1.0 | 214 | 1.4616 | 0.3925 |
68
+ | 0.1342 | 2.0 | 428 | 1.0384 | 0.6449 |
69
+ | 0.0582 | 3.0 | 642 | 1.5578 | 0.6542 |
70
+ | 0.6567 | 4.0 | 856 | 1.2043 | 0.7850 |
71
+ | 0.0202 | 5.0 | 1070 | 0.5967 | 0.8598 |
72
+ | 0.0008 | 6.0 | 1284 | 0.6261 | 0.8692 |
73
+ | 0.0006 | 7.0 | 1498 | 0.5857 | 0.8785 |
74
+ | 0.0004 | 8.0 | 1712 | 0.4992 | 0.9065 |
75
+ | 0.0004 | 9.0 | 1926 | 0.4943 | 0.9159 |
76
+ | 0.0003 | 10.0 | 2140 | 0.4912 | 0.9159 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.45.0.dev0
82
+ - Pytorch 2.4.0+cu121
83
+ - Datasets 3.0.0
84
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-tiny",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "apply_spec_augment": false,
6
+ "architectures": [
7
+ "WhisperForAudioClassification"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 50257,
11
+ "classifier_proj_size": 256,
12
+ "d_model": 384,
13
+ "decoder_attention_heads": 6,
14
+ "decoder_ffn_dim": 1536,
15
+ "decoder_layerdrop": 0.0,
16
+ "decoder_layers": 4,
17
+ "decoder_start_token_id": 50258,
18
+ "dropout": 0.0,
19
+ "encoder_attention_heads": 6,
20
+ "encoder_ffn_dim": 1536,
21
+ "encoder_layerdrop": 0.0,
22
+ "encoder_layers": 4,
23
+ "eos_token_id": 50257,
24
+ "forced_decoder_ids": [
25
+ [
26
+ 1,
27
+ 50259
28
+ ],
29
+ [
30
+ 2,
31
+ 50359
32
+ ],
33
+ [
34
+ 3,
35
+ 50363
36
+ ]
37
+ ],
38
+ "id2label": {
39
+ "0": "anger",
40
+ "1": "boredom",
41
+ "2": "disgust",
42
+ "3": "fear",
43
+ "4": "happiness",
44
+ "5": "sadness",
45
+ "6": "neutral"
46
+ },
47
+ "init_std": 0.02,
48
+ "is_encoder_decoder": true,
49
+ "label2id": {
50
+ "anger": "0",
51
+ "boredom": "1",
52
+ "disgust": "2",
53
+ "fear": "3",
54
+ "happiness": "4",
55
+ "neutral": "6",
56
+ "sadness": "5"
57
+ },
58
+ "mask_feature_length": 10,
59
+ "mask_feature_min_masks": 0,
60
+ "mask_feature_prob": 0.0,
61
+ "mask_time_length": 10,
62
+ "mask_time_min_masks": 2,
63
+ "mask_time_prob": 0.05,
64
+ "max_source_positions": 1500,
65
+ "max_target_positions": 448,
66
+ "median_filter_width": 7,
67
+ "model_type": "whisper",
68
+ "num_hidden_layers": 4,
69
+ "num_mel_bins": 80,
70
+ "pad_token_id": 50257,
71
+ "scale_embedding": false,
72
+ "torch_dtype": "float32",
73
+ "transformers_version": "4.45.0.dev0",
74
+ "use_cache": true,
75
+ "use_weighted_layer_sum": false,
76
+ "vocab_size": 51865
77
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a6191977ba4002b3d8c787e1805cf846661bda078a4cdfc4b3ff84c9cca5576
3
+ size 33242308
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
runs/Sep11_19-48-16_782e0ab71dd8/events.out.tfevents.1726084097.782e0ab71dd8.387.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2bca9b446c64906d38ac5f858bac8b86bbeb74b62ba512bd9cc2b114f263100
3
+ size 99797
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69914a74464d8435857dd8b03ca1c1bf580d9d97f65714c12f740cea32d64037
3
+ size 5304