{ "_name_or_path": "meta-llama/Meta-Llama-3-70B-Instruct", "architectures": [ "LlamaForSequenceClassification" ], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 128000, "eos_token_id": 128009, "head_dim": 128, "hidden_act": "silu", "hidden_size": 8192, "id2label": { "0": "token activation 0", "1": "token activation 1", "2": "token activation 2", "3": "token activation 3", "4": "token activation 4", "5": "token activation 5", "6": "token activation 6", "7": "token activation 7", "8": "token activation 8", "9": "token activation 9", "10": "token activation 10", "11": "token activation 11", "12": "token activation 12", "13": "token activation 13", "14": "token activation 14", "15": "token activation 15", "16": "token activation 16", "17": "token activation 17", "18": "token activation 18", "19": "token activation 19", "20": "token activation 20", "21": "token activation 21", "22": "token activation 22", "23": "token activation 23", "24": "token activation 24", "25": "token activation 25", "26": "token activation 26", "27": "token activation 27", "28": "token activation 28", "29": "token activation 29" }, "initializer_range": 0.02, "intermediate_size": 28672, "label2id": { "token activation 0": 0, "token activation 1": 1, "token activation 10": 10, "token activation 11": 11, "token activation 12": 12, "token activation 13": 13, "token activation 14": 14, "token activation 15": 15, "token activation 16": 16, "token activation 17": 17, "token activation 18": 18, "token activation 19": 19, "token activation 2": 2, "token activation 20": 20, "token activation 21": 21, "token activation 22": 22, "token activation 23": 23, "token activation 24": 24, "token activation 25": 25, "token activation 26": 26, "token activation 27": 27, "token activation 28": 28, "token activation 29": 29, "token activation 3": 3, "token activation 4": 4, "token activation 5": 5, "token activation 6": 6, "token activation 7": 7, "token activation 8": 8, "token activation 9": 9 }, "max_position_embeddings": 8192, "mlp_bias": false, "model_type": "llama", "num_attention_heads": 64, "num_hidden_layers": 80, "num_key_value_heads": 8, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 500000.0, "tie_word_embeddings": false, "torch_dtype": "float16", "transformers_version": "4.48.2", "use_cache": true, "vocab_size": 128256 }