error loading using llama-cpp-python
from llama_cpp import Llama
llm = Llama.from_pretrained(
repo_id="bartowski/microsoft_Phi-4-mini-instruct-GGUF",
filename="microsoft_Phi-4-mini-instruct-Q5_K_M.gguf",
)
ValueError Traceback (most recent call last)
Cell In[5], line 3
1 from llama_cpp import Llama
----> 3 llm = Llama.from_pretrained(
4 repo_id="bartowski/microsoft_Phi-4-mini-instruct-GGUF",
5 filename="microsoft_Phi-4-mini-instruct-Q5_K_M.gguf",
6 )
File ~/mounted_nfs_data/ashish/slim-openai-chroma/SlimCs_env/lib/python3.12/site-packages/llama_cpp/llama.py:2218, in Llama.from_pretrained(cls, repo_id, filename, local_dir, local_dir_use_symlinks, cache_dir, **kwargs)
2215 else:
2216 model_path = os.path.join(local_dir, filename)
-> 2218 return cls(
2219 model_path=model_path,
2220 **kwargs,
2221 )
File ~/mounted_nfs_data/ashish/slim-openai-chroma/SlimCs_env/lib/python3.12/site-packages/llama_cpp/llama.py:371, in Llama.init(self, model_path, n_gpu_layers, split_mode, main_gpu, tensor_split, rpc_servers, vocab_only, use_mmap, use_mlock, kv_overrides, seed, n_ctx, n_batch, n_threads, n_threads_batch, rope_scaling_type, pooling_type, rope_freq_base, rope_freq_scale, yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow, yarn_orig_ctx, logits_all, embedding, offload_kqv, flash_attn, last_n_tokens_size, lora_base, lora_scale, lora_path, numa, chat_format, chat_handler, draft_model, tokenizer, type_k, type_v, spm_infill, verbose, **kwargs)
366 if not os.path.exists(model_path):
367 raise ValueError(f"Model path does not exist: {model_path}")
369 self._model = self._stack.enter_context(
370 contextlib.closing(
--> 371 LlamaModel(
372 path_model=self.model_path,
373 params=self.model_params,
374 verbose=self.verbose,
375 )
376 )
377 )
379 # Override tokenizer
380 self.tokenizer = tokenizer or LlamaTokenizer(self)
File ~/mounted_nfs_data/ashish/slim-openai-chroma/SlimCs_env/lib/python3.12/site-packages/llama_cpp/_internals.py:55, in _LlamaModel.init(self, path_model, params, verbose)
50 self.model = llama_cpp.llama_load_model_from_file(
51 self.path_model.encode("utf-8"), self.params
52 )
54 if self.model is None:
---> 55 raise ValueError(f"Failed to load model from file: {path_model}")
57 def free_model():
58 if self.model is None:
ValueError: Failed to load model from file: /home/til-master-node/.cache/huggingface/hub/models--bartowski--microsoft_Phi-4-mini-instruct-GGUF/snapshots/7ff82c2aaa4dde30121698a973765f39be5288c0/./microsoft_Phi-4-mini-instruct-Q5_K_M.gguf