PCIResearch commited on
Commit
6c1569c
·
1 Parent(s): 8e3eb62

Upload 11 files

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/data/PCITransGPT/v1.1",
3
  "architectures": [
4
  "TransCoreMLlamaForCausalLM"
5
  ],
@@ -9,6 +9,7 @@
9
  "hidden_act": "silu",
10
  "hidden_size": 5120,
11
  "image_aspect_ratio": "pad",
 
12
  "initializer_range": 0.02,
13
  "intermediate_size": 13824,
14
  "max_position_embeddings": 4096,
@@ -19,7 +20,7 @@
19
  "mm_use_im_start_end": false,
20
  "mm_vision_select_feature": "patch",
21
  "mm_vision_select_layer": -2,
22
- "mm_vision_tower": "openai/clip-vit-large-patch14-336",
23
  "model_type": "transcorem",
24
  "num_attention_heads": 40,
25
  "num_hidden_layers": 40,
@@ -29,12 +30,13 @@
29
  "rms_norm_eps": 1e-06,
30
  "rope_scaling": null,
31
  "tie_word_embeddings": false,
32
- "tokenizer_model_max_length": 2048,
33
- "tokenizer_padding_side": "right",
34
  "torch_dtype": "bfloat16",
35
  "transformers_version": "4.31.0",
 
36
  "tune_mm_mlp_adapter": false,
 
37
  "use_cache": true,
38
  "use_mm_proj": true,
 
39
  "vocab_size": 40076
40
  }
 
1
  {
2
+ "_name_or_path": "TranscoreM",
3
  "architectures": [
4
  "TransCoreMLlamaForCausalLM"
5
  ],
 
9
  "hidden_act": "silu",
10
  "hidden_size": 5120,
11
  "image_aspect_ratio": "pad",
12
+ "image_grid_pinpoints": null,
13
  "initializer_range": 0.02,
14
  "intermediate_size": 13824,
15
  "max_position_embeddings": 4096,
 
20
  "mm_use_im_start_end": false,
21
  "mm_vision_select_feature": "patch",
22
  "mm_vision_select_layer": -2,
23
+ "mm_vision_tower": "/home/zhangdaowei/models/TransCorem-v0.9/visual_encoder",
24
  "model_type": "transcorem",
25
  "num_attention_heads": 40,
26
  "num_hidden_layers": 40,
 
30
  "rms_norm_eps": 1e-06,
31
  "rope_scaling": null,
32
  "tie_word_embeddings": false,
 
 
33
  "torch_dtype": "bfloat16",
34
  "transformers_version": "4.31.0",
35
+ "tune_entire_model": false,
36
  "tune_mm_mlp_adapter": false,
37
+ "tune_vision_tower": false,
38
  "use_cache": true,
39
  "use_mm_proj": true,
40
+ "vision_tower_lr": null,
41
  "vocab_size": 40076
42
  }
pytorch_model.bin.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 26867080192
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00003-of-00003.bin",
@@ -409,397 +409,6 @@
409
  "model.mm_projector.0.weight": "pytorch_model-00003-of-00003.bin",
410
  "model.mm_projector.2.bias": "pytorch_model-00003-of-00003.bin",
411
  "model.mm_projector.2.weight": "pytorch_model-00003-of-00003.bin",
412
- "model.norm.weight": "pytorch_model-00003-of-00003.bin",
413
- "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "pytorch_model-00003-of-00003.bin",
414
- "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "pytorch_model-00003-of-00003.bin",
415
- "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "pytorch_model-00003-of-00003.bin",
416
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
417
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
418
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
419
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
420
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
421
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
422
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
423
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
424
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
425
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
426
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
427
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
428
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
429
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
430
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
431
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
432
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
433
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
434
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
435
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
436
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
437
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
438
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
439
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
440
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
441
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
442
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
443
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
444
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
445
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
446
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
447
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
448
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
449
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
450
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
451
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
452
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
453
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
454
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
455
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
456
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
457
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
458
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
459
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
460
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
461
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
462
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
463
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
464
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
465
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
466
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
467
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
468
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
469
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
470
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
471
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
472
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
473
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
474
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
475
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
476
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
477
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
478
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
479
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
480
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
481
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
482
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
483
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
484
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
485
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
486
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
487
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
488
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
489
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
490
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
491
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
492
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
493
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
494
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
495
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
496
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
497
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
498
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
499
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
500
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
501
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
502
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
503
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
504
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
505
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
506
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
507
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
508
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
509
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
510
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
511
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
512
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
513
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
514
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
515
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
516
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
517
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
518
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
519
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
520
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
521
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
522
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
523
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
524
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
525
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
526
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
527
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
528
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
529
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
530
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
531
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
532
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
533
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
534
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
535
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
536
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
537
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
538
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
539
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
540
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
541
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
542
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
543
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
544
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
545
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
546
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
547
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
548
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
549
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
550
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
551
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
552
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
553
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
554
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
555
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
556
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
557
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
558
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
559
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
560
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
561
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
562
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
563
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
564
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
565
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
566
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
567
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
568
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
569
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
570
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
571
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
572
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
573
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
574
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
575
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
576
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
577
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
578
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
579
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
580
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
581
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
582
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
583
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
584
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
585
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
586
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
587
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
588
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
589
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
590
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
591
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
592
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
593
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
594
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
595
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
596
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
597
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
598
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
599
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
600
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
601
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
602
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
603
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
604
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
605
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
606
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
607
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
608
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
609
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
610
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
611
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
612
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
613
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
614
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
615
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
616
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
617
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
618
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
619
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
620
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
621
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
622
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
623
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
624
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
625
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
626
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
627
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
628
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
629
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
630
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
631
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
632
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
633
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
634
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
635
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
636
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
637
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
638
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
639
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
640
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
641
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
642
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
643
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
644
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
645
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
646
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
647
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
648
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
649
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
650
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
651
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
652
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
653
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
654
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
655
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
656
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
657
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
658
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
659
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
660
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
661
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
662
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
663
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
664
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
665
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
666
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
667
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
668
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
669
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
670
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
671
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
672
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
673
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
674
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
675
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
676
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
677
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
678
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
679
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
680
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
681
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
682
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
683
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
684
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
685
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
686
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
687
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
688
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
689
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
690
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
691
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
692
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
693
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
694
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
695
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
696
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
697
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
698
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
699
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
700
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
701
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
702
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
703
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
704
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
705
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
706
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
707
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
708
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
709
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
710
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
711
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
712
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
713
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
714
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
715
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
716
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
717
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
718
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
719
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
720
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
721
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
722
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
723
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
724
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
725
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
726
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
727
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
728
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
729
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
730
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
731
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
732
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
733
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
734
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
735
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
736
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
737
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
738
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
739
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
740
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
741
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
742
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
743
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
744
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
745
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
746
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
747
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
748
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
749
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
750
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
751
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
752
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
753
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
754
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
755
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
756
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
757
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
758
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
759
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
760
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
761
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
762
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
763
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
764
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
765
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
766
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
767
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
768
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
769
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
770
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
771
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
772
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
773
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
774
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
775
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
776
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
777
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
778
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
779
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
780
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
781
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
782
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
783
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
784
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
785
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
786
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
787
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
788
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
789
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
790
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
791
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
792
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
793
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
794
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
795
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
796
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
797
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
798
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
799
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
800
- "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "pytorch_model-00003-of-00003.bin",
801
- "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "pytorch_model-00003-of-00003.bin",
802
- "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "pytorch_model-00003-of-00003.bin",
803
- "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "pytorch_model-00003-of-00003.bin"
804
  }
805
  }
 
1
  {
2
  "metadata": {
3
+ "total_size": 26260065280
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00003-of-00003.bin",
 
409
  "model.mm_projector.0.weight": "pytorch_model-00003-of-00003.bin",
410
  "model.mm_projector.2.bias": "pytorch_model-00003-of-00003.bin",
411
  "model.mm_projector.2.weight": "pytorch_model-00003-of-00003.bin",
412
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
  }
414
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb6e00f2c100a45af904ed79a1e34c6a1e6eaba0319a0469d0312df29017ed7c
3
- size 5947
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d0e6b0bde37a458a6ed069e0ae44ee7c96a376d4fd79093e2cb4b5c393918aa
3
+ size 6139
visual_encoder/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/data/lmm_models/clip-vit-large-patch14-336",
3
+ "attention_dropout": 0.0,
4
+ "dropout": 0.0,
5
+ "hidden_act": "quick_gelu",
6
+ "hidden_size": 1024,
7
+ "image_size": 336,
8
+ "initializer_factor": 1.0,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 4096,
11
+ "layer_norm_eps": 1e-05,
12
+ "model_type": "clip_vision_model",
13
+ "num_attention_heads": 16,
14
+ "num_channels": 3,
15
+ "num_hidden_layers": 24,
16
+ "patch_size": 14,
17
+ "projection_dim": 768,
18
+ "transformers_version": "4.31.0"
19
+ }
visual_encoder/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 336,
4
+ "width": 336
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 336
27
+ }
28
+ }
visual_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7c1dbc68ee9bd545517842d8137286e911254d028eb4258daedac303e46c80
3
+ size 607144361