{ "architectures": [ "LlamaForCausalLM" ], "model_type": "llama", "torch_dtype": "float16", "transformers_version": "4.49.0", "use_cache": true, "vocab_size": 32000 }