crossdelenna commited on
Commit
41cc28f
·
verified ·
1 Parent(s): ca7b3be

Model save

Browse files
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: mit
4
+ base_model: openai/whisper-large-v3-turbo
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: whisperturbo
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # whisperturbo
16
+
17
+ This model is a fine-tuned version of [openai/whisper-large-v3-turbo](https://huggingface.co/openai/whisper-large-v3-turbo) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 0.001
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
+ - lr_scheduler_type: linear
42
+ - lr_scheduler_warmup_steps: 50
43
+ - training_steps: 1200
44
+ - mixed_precision_training: Native AMP
45
+
46
+ ### Framework versions
47
+
48
+ - PEFT 0.14.0
49
+ - Transformers 4.46.3
50
+ - Pytorch 2.5.1+cu121
51
+ - Datasets 3.1.0
52
+ - Tokenizers 0.20.3
adapter_config.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": {
4
- "base_model_class": "WhisperForConditionalGeneration",
5
- "parent_library": "transformers.models.whisper.modeling_whisper"
6
  },
7
- "base_model_name_or_path": "openai/whisper-large-v3-turbo",
8
  "bias": "none",
9
  "eva_config": null,
10
  "exclude_modules": null,
@@ -26,8 +26,8 @@
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
- "v_proj",
30
- "q_proj"
31
  ],
32
  "task_type": null,
33
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": {
4
+ "base_model_class": "PeftModel",
5
+ "parent_library": "peft.peft_model"
6
  },
7
+ "base_model_name_or_path": null,
8
  "bias": "none",
9
  "eva_config": null,
10
  "exclude_modules": null,
 
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
+ "q_proj",
30
+ "v_proj"
31
  ],
32
  "task_type": null,
33
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee05bb65e33ffd668755268d4753f8eaad44e37708f322b35f83bf666984c3ef
3
- size 26237160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2059ddbba36a23ed0dc44493177dde2278918330c80e864b543004006e246006
3
+ size 26245320
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aec3c0ff868ff4b7580d749fa93db5ddf2797f6f545dae3307a6d2ed55dce164
3
  size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:539573d3372b4c9d623d9e58990f626e07f079aceae66e186f7ad160f9f1f231
3
  size 5432