deepdml commited on
Commit
7ccd834
·
1 Parent(s): 6ff0727

Training in progress, step 500

Browse files
Files changed (3) hide show
  1. config.json +33 -3
  2. pytorch_model.bin +1 -1
  3. training_args.bin +2 -2
config.json CHANGED
@@ -1,12 +1,16 @@
1
  {
2
  "_name_or_path": "facebook/wav2vec2-base",
3
  "activation_dropout": 0.0,
 
 
 
4
  "apply_spec_augment": true,
5
  "architectures": [
6
  "Wav2Vec2ForCTC"
7
  ],
8
  "attention_dropout": 0.1,
9
  "bos_token_id": 1,
 
10
  "codevector_dim": 256,
11
  "contrastive_logits_temperature": 0.1,
12
  "conv_bias": false,
@@ -48,7 +52,6 @@
48
  "feat_quantizer_dropout": 0.0,
49
  "final_dropout": 0.0,
50
  "freeze_feat_extract_train": true,
51
- "gradient_checkpointing": true,
52
  "hidden_act": "gelu",
53
  "hidden_dropout": 0.1,
54
  "hidden_size": 768,
@@ -62,8 +65,10 @@
62
  "mask_channel_prob": 0.0,
63
  "mask_channel_selection": "static",
64
  "mask_feature_length": 10,
 
65
  "mask_feature_prob": 0.0,
66
  "mask_time_length": 10,
 
67
  "mask_time_min_space": 1,
68
  "mask_time_other": 0.0,
69
  "mask_time_prob": 0.05,
@@ -71,6 +76,7 @@
71
  "model_type": "wav2vec2",
72
  "no_mask_channel_overlap": false,
73
  "no_mask_time_overlap": false,
 
74
  "num_attention_heads": 12,
75
  "num_codevector_groups": 2,
76
  "num_codevectors_per_group": 320,
@@ -79,9 +85,33 @@
79
  "num_feat_extract_layers": 7,
80
  "num_hidden_layers": 12,
81
  "num_negatives": 100,
 
82
  "pad_token_id": 29,
83
  "proj_codevector_dim": 256,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  "torch_dtype": "float32",
85
- "transformers_version": "4.9.1",
86
- "vocab_size": 32
 
 
87
  }
 
1
  {
2
  "_name_or_path": "facebook/wav2vec2-base",
3
  "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
  "Wav2Vec2ForCTC"
10
  ],
11
  "attention_dropout": 0.1,
12
  "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
  "codevector_dim": 256,
15
  "contrastive_logits_temperature": 0.1,
16
  "conv_bias": false,
 
52
  "feat_quantizer_dropout": 0.0,
53
  "final_dropout": 0.0,
54
  "freeze_feat_extract_train": true,
 
55
  "hidden_act": "gelu",
56
  "hidden_dropout": 0.1,
57
  "hidden_size": 768,
 
65
  "mask_channel_prob": 0.0,
66
  "mask_channel_selection": "static",
67
  "mask_feature_length": 10,
68
+ "mask_feature_min_masks": 0,
69
  "mask_feature_prob": 0.0,
70
  "mask_time_length": 10,
71
+ "mask_time_min_masks": 2,
72
  "mask_time_min_space": 1,
73
  "mask_time_other": 0.0,
74
  "mask_time_prob": 0.05,
 
76
  "model_type": "wav2vec2",
77
  "no_mask_channel_overlap": false,
78
  "no_mask_time_overlap": false,
79
+ "num_adapter_layers": 3,
80
  "num_attention_heads": 12,
81
  "num_codevector_groups": 2,
82
  "num_codevectors_per_group": 320,
 
85
  "num_feat_extract_layers": 7,
86
  "num_hidden_layers": 12,
87
  "num_negatives": 100,
88
+ "output_hidden_size": 768,
89
  "pad_token_id": 29,
90
  "proj_codevector_dim": 256,
91
+ "tdnn_dilation": [
92
+ 1,
93
+ 2,
94
+ 3,
95
+ 1,
96
+ 1
97
+ ],
98
+ "tdnn_dim": [
99
+ 512,
100
+ 512,
101
+ 512,
102
+ 512,
103
+ 1500
104
+ ],
105
+ "tdnn_kernel": [
106
+ 5,
107
+ 3,
108
+ 3,
109
+ 1,
110
+ 1
111
+ ],
112
  "torch_dtype": "float32",
113
+ "transformers_version": "4.15.0",
114
+ "use_weighted_layer_sum": false,
115
+ "vocab_size": 32,
116
+ "xvector_output_dim": 512
117
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b2fcf8a34b2fa94c6d539d796740544265edc82f62dc78e9709554e9b49afaf
3
  size 377670039
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c23b06f74030974c95aefc3171a0f413843effaf059ad482bba951996a320f28
3
  size 377670039
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3525ce693ff1a560cef03e05cced987bc53418307793029bef8d9b71b29cf858
3
- size 2607
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ec2b8d3bd6abd93e591dc453ae394ea802bd4373ed86cf84f21f9ed38d98ce
3
+ size 2927