{ "_name_or_path": "HuggingFaceTB/SmolVLM-Instruct", "architectures": [ "Idefics3ForConditionalGeneration" ], "image_seq_len": 81, "image_token_id": 49153, "model_type": "idefics3", "pad_token_id": 128002, "quantization_config": { "config_groups": { "group_0": { "input_activations": null, "output_activations": null, "targets": [ "Linear" ], "weights": { "actorder": null, "block_structure": null, "dynamic": false, "group_size": 128, "num_bits": 4, "observer": "minmax", "observer_kwargs": {}, "strategy": "group", "symmetric": true, "type": "int" } } }, "format": "pack-quantized", "global_compression_ratio": 1.3447756848086136, "ignore": [ "model.vision_model.encoder.layers.0.self_attn.k_proj", "model.vision_model.encoder.layers.0.self_attn.v_proj", "model.vision_model.encoder.layers.0.self_attn.q_proj", "model.vision_model.encoder.layers.0.self_attn.out_proj", "model.vision_model.encoder.layers.0.mlp.fc1", "model.vision_model.encoder.layers.0.mlp.fc2", "model.vision_model.encoder.layers.1.self_attn.k_proj", "model.vision_model.encoder.layers.1.self_attn.v_proj", "model.vision_model.encoder.layers.1.self_attn.q_proj", "model.vision_model.encoder.layers.1.self_attn.out_proj", "model.vision_model.encoder.layers.1.mlp.fc1", "model.vision_model.encoder.layers.1.mlp.fc2", "model.vision_model.encoder.layers.2.self_attn.k_proj", "model.vision_model.encoder.layers.2.self_attn.v_proj", "model.vision_model.encoder.layers.2.self_attn.q_proj", "model.vision_model.encoder.layers.2.self_attn.out_proj", "model.vision_model.encoder.layers.2.mlp.fc1", "model.vision_model.encoder.layers.2.mlp.fc2", "model.vision_model.encoder.layers.3.self_attn.k_proj", "model.vision_model.encoder.layers.3.self_attn.v_proj", "model.vision_model.encoder.layers.3.self_attn.q_proj", "model.vision_model.encoder.layers.3.self_attn.out_proj", "model.vision_model.encoder.layers.3.mlp.fc1", "model.vision_model.encoder.layers.3.mlp.fc2", "model.vision_model.encoder.layers.4.self_attn.k_proj", "model.vision_model.encoder.layers.4.self_attn.v_proj", "model.vision_model.encoder.layers.4.self_attn.q_proj", "model.vision_model.encoder.layers.4.self_attn.out_proj", "model.vision_model.encoder.layers.4.mlp.fc1", "model.vision_model.encoder.layers.4.mlp.fc2", "model.vision_model.encoder.layers.5.self_attn.k_proj", "model.vision_model.encoder.layers.5.self_attn.v_proj", "model.vision_model.encoder.layers.5.self_attn.q_proj", "model.vision_model.encoder.layers.5.self_attn.out_proj", "model.vision_model.encoder.layers.5.mlp.fc1", "model.vision_model.encoder.layers.5.mlp.fc2", "model.vision_model.encoder.layers.6.self_attn.k_proj", "model.vision_model.encoder.layers.6.self_attn.v_proj", "model.vision_model.encoder.layers.6.self_attn.q_proj", "model.vision_model.encoder.layers.6.self_attn.out_proj", "model.vision_model.encoder.layers.6.mlp.fc1", "model.vision_model.encoder.layers.6.mlp.fc2", "model.vision_model.encoder.layers.7.self_attn.k_proj", "model.vision_model.encoder.layers.7.self_attn.v_proj", "model.vision_model.encoder.layers.7.self_attn.q_proj", "model.vision_model.encoder.layers.7.self_attn.out_proj", "model.vision_model.encoder.layers.7.mlp.fc1", "model.vision_model.encoder.layers.7.mlp.fc2", "model.vision_model.encoder.layers.8.self_attn.k_proj", "model.vision_model.encoder.layers.8.self_attn.v_proj", "model.vision_model.encoder.layers.8.self_attn.q_proj", "model.vision_model.encoder.layers.8.self_attn.out_proj", "model.vision_model.encoder.layers.8.mlp.fc1", "model.vision_model.encoder.layers.8.mlp.fc2", "model.vision_model.encoder.layers.9.self_attn.k_proj", "model.vision_model.encoder.layers.9.self_attn.v_proj", "model.vision_model.encoder.layers.9.self_attn.q_proj", "model.vision_model.encoder.layers.9.self_attn.out_proj", "model.vision_model.encoder.layers.9.mlp.fc1", "model.vision_model.encoder.layers.9.mlp.fc2", "model.vision_model.encoder.layers.10.self_attn.k_proj", "model.vision_model.encoder.layers.10.self_attn.v_proj", "model.vision_model.encoder.layers.10.self_attn.q_proj", "model.vision_model.encoder.layers.10.self_attn.out_proj", "model.vision_model.encoder.layers.10.mlp.fc1", "model.vision_model.encoder.layers.10.mlp.fc2", "model.vision_model.encoder.layers.11.self_attn.k_proj", "model.vision_model.encoder.layers.11.self_attn.v_proj", "model.vision_model.encoder.layers.11.self_attn.q_proj", "model.vision_model.encoder.layers.11.self_attn.out_proj", "model.vision_model.encoder.layers.11.mlp.fc1", "model.vision_model.encoder.layers.11.mlp.fc2", "model.vision_model.encoder.layers.12.self_attn.k_proj", "model.vision_model.encoder.layers.12.self_attn.v_proj", "model.vision_model.encoder.layers.12.self_attn.q_proj", "model.vision_model.encoder.layers.12.self_attn.out_proj", "model.vision_model.encoder.layers.12.mlp.fc1", "model.vision_model.encoder.layers.12.mlp.fc2", "model.vision_model.encoder.layers.13.self_attn.k_proj", "model.vision_model.encoder.layers.13.self_attn.v_proj", "model.vision_model.encoder.layers.13.self_attn.q_proj", "model.vision_model.encoder.layers.13.self_attn.out_proj", "model.vision_model.encoder.layers.13.mlp.fc1", "model.vision_model.encoder.layers.13.mlp.fc2", "model.vision_model.encoder.layers.14.self_attn.k_proj", "model.vision_model.encoder.layers.14.self_attn.v_proj", "model.vision_model.encoder.layers.14.self_attn.q_proj", "model.vision_model.encoder.layers.14.self_attn.out_proj", "model.vision_model.encoder.layers.14.mlp.fc1", "model.vision_model.encoder.layers.14.mlp.fc2", "model.vision_model.encoder.layers.15.self_attn.k_proj", "model.vision_model.encoder.layers.15.self_attn.v_proj", "model.vision_model.encoder.layers.15.self_attn.q_proj", "model.vision_model.encoder.layers.15.self_attn.out_proj", "model.vision_model.encoder.layers.15.mlp.fc1", "model.vision_model.encoder.layers.15.mlp.fc2", "model.vision_model.encoder.layers.16.self_attn.k_proj", "model.vision_model.encoder.layers.16.self_attn.v_proj", "model.vision_model.encoder.layers.16.self_attn.q_proj", "model.vision_model.encoder.layers.16.self_attn.out_proj", "model.vision_model.encoder.layers.16.mlp.fc1", "model.vision_model.encoder.layers.16.mlp.fc2", "model.vision_model.encoder.layers.17.self_attn.k_proj", "model.vision_model.encoder.layers.17.self_attn.v_proj", "model.vision_model.encoder.layers.17.self_attn.q_proj", "model.vision_model.encoder.layers.17.self_attn.out_proj", "model.vision_model.encoder.layers.17.mlp.fc1", "model.vision_model.encoder.layers.17.mlp.fc2", "model.vision_model.encoder.layers.18.self_attn.k_proj", "model.vision_model.encoder.layers.18.self_attn.v_proj", "model.vision_model.encoder.layers.18.self_attn.q_proj", "model.vision_model.encoder.layers.18.self_attn.out_proj", "model.vision_model.encoder.layers.18.mlp.fc1", "model.vision_model.encoder.layers.18.mlp.fc2", "model.vision_model.encoder.layers.19.self_attn.k_proj", "model.vision_model.encoder.layers.19.self_attn.v_proj", "model.vision_model.encoder.layers.19.self_attn.q_proj", "model.vision_model.encoder.layers.19.self_attn.out_proj", "model.vision_model.encoder.layers.19.mlp.fc1", "model.vision_model.encoder.layers.19.mlp.fc2", "model.vision_model.encoder.layers.20.self_attn.k_proj", "model.vision_model.encoder.layers.20.self_attn.v_proj", "model.vision_model.encoder.layers.20.self_attn.q_proj", "model.vision_model.encoder.layers.20.self_attn.out_proj", "model.vision_model.encoder.layers.20.mlp.fc1", "model.vision_model.encoder.layers.20.mlp.fc2", "model.vision_model.encoder.layers.21.self_attn.k_proj", "model.vision_model.encoder.layers.21.self_attn.v_proj", "model.vision_model.encoder.layers.21.self_attn.q_proj", "model.vision_model.encoder.layers.21.self_attn.out_proj", "model.vision_model.encoder.layers.21.mlp.fc1", "model.vision_model.encoder.layers.21.mlp.fc2", "model.vision_model.encoder.layers.22.self_attn.k_proj", "model.vision_model.encoder.layers.22.self_attn.v_proj", "model.vision_model.encoder.layers.22.self_attn.q_proj", "model.vision_model.encoder.layers.22.self_attn.out_proj", "model.vision_model.encoder.layers.22.mlp.fc1", "model.vision_model.encoder.layers.22.mlp.fc2", "model.vision_model.encoder.layers.23.self_attn.k_proj", "model.vision_model.encoder.layers.23.self_attn.v_proj", "model.vision_model.encoder.layers.23.self_attn.q_proj", "model.vision_model.encoder.layers.23.self_attn.out_proj", "model.vision_model.encoder.layers.23.mlp.fc1", "model.vision_model.encoder.layers.23.mlp.fc2", "model.vision_model.encoder.layers.24.self_attn.k_proj", "model.vision_model.encoder.layers.24.self_attn.v_proj", "model.vision_model.encoder.layers.24.self_attn.q_proj", "model.vision_model.encoder.layers.24.self_attn.out_proj", "model.vision_model.encoder.layers.24.mlp.fc1", "model.vision_model.encoder.layers.24.mlp.fc2", "model.vision_model.encoder.layers.25.self_attn.k_proj", "model.vision_model.encoder.layers.25.self_attn.v_proj", "model.vision_model.encoder.layers.25.self_attn.q_proj", "model.vision_model.encoder.layers.25.self_attn.out_proj", "model.vision_model.encoder.layers.25.mlp.fc1", "model.vision_model.encoder.layers.25.mlp.fc2", "model.vision_model.encoder.layers.26.self_attn.k_proj", "model.vision_model.encoder.layers.26.self_attn.v_proj", "model.vision_model.encoder.layers.26.self_attn.q_proj", "model.vision_model.encoder.layers.26.self_attn.out_proj", "model.vision_model.encoder.layers.26.mlp.fc1", "model.vision_model.encoder.layers.26.mlp.fc2", "model.connector.modality_projection.proj", "lm_head" ], "kv_cache_scheme": null, "quant_method": "compressed-tensors", "quantization_status": "compressed", "sparsity_config": { "format": "dense", "global_sparsity": 0.11567594517840002, "ignore": [], "registry_requires_subclass": false, "sparsity_structure": "unstructured", "targets": [] } }, "scale_factor": 3, "text_config": { "_attn_implementation_autoset": true, "_flash_attn_2_enabled": true, "_name_or_path": "/fsx/m4/experiments/local_experiment_dir/s3_async_temporary_checkpoint_folder/tr_324_opt_400/unwrapped_model", "architectures": [ "VLlama3ForCausalLM" ], "bos_token_id": 0, "eos_token_id": 0, "head_dim": 64, "hidden_size": 2048, "intermediate_size": 8192, "max_position_embeddings": 16384, "model_type": "llama", "neftune_noise_alpha": 0.0, "num_hidden_layers": 24, "pad_token_id": 2, "perceiver_config": { "_attn_implementation_autoset": false, "_name_or_path": "", "add_cross_attention": false, "architectures": null, "attention_dropout": 0.0, "bad_words_ids": null, "begin_suppress_tokens": null, "bos_token_id": null, "chunk_size_feed_forward": 0, "cross_attention_hidden_size": null, "decoder_start_token_id": null, "diversity_penalty": 0.0, "do_sample": false, "early_stopping": false, "encoder_no_repeat_ngram_size": 0, "eos_token_id": null, "exponential_decay_length_penalty": null, "finetuning_task": null, "forced_bos_token_id": null, "forced_eos_token_id": null, "hidden_act": "silu", "id2label": { "0": "LABEL_0", "1": "LABEL_1" }, "is_decoder": false, "is_encoder_decoder": false, "label2id": { "LABEL_0": 0, "LABEL_1": 1 }, "length_penalty": 1.0, "max_length": 20, "min_length": 0, "model_type": "vllama3", "no_repeat_ngram_size": 0, "num_beam_groups": 1, "num_beams": 1, "num_key_value_heads": 1, "num_return_sequences": 1, "output_attentions": false, "output_hidden_states": false, "output_scores": false, "pad_token_id": null, "prefix": null, "problem_type": null, "pruned_heads": {}, "qk_layer_norms_perceiver": false, "remove_invalid_values": false, "repetition_penalty": 1.0, "resampler_depth": 6, "resampler_head_dim": 96, "resampler_n_heads": 16, "resampler_n_latents": 64, "return_dict": true, "return_dict_in_generate": false, "sep_token_id": null, "suppress_tokens": null, "task_specific_params": null, "temperature": 1.0, "tf_legacy_loss": false, "tie_encoder_decoder": false, "tie_word_embeddings": true, "tokenizer_class": null, "top_k": 50, "top_p": 1.0, "torch_dtype": null, "torchscript": false, "transformers_version": "4.46.0", "typical_p": 1.0, "use_bfloat16": false }, "qk_layer_norms": false, "rms_norm_eps": 1e-05, "rope_theta": 273768.0, "torch_dtype": "bfloat16", "use_resampler": false, "vocab_size": 49155 }, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "transformers.js_config": { "dtype": { "decoder_model_merged": "q4", "embed_tokens": "auto", "vision_encoder": "auto" }, "kv_cache_dtype": { "fp16": "float16", "q4f16": "float16" } }, "transformers_version": "4.48.1", "use_cache": true, "vision_config": { "image_size": 384, "intermediate_size": 4304, "max_image_size": { "longest_edge": 384 }, "model_type": "idefics3_vision", "num_hidden_layers": 27, "patch_size": 14, "size": { "longest_edge": 1920 }, "tie_word_embeddings": false }, "vocab_size": 49155 }