Spaces:
Sleeping
Sleeping
Commit
·
a823c3f
1
Parent(s):
e31f9c6
removed dependency from intel-npu library otherwise builds in huggingface will fail
Browse files- repository/intel_npu.py +32 -30
- requirements.txt +1 -1
repository/intel_npu.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import json
|
2 |
from pathlib import Path
|
3 |
|
4 |
-
from intel_npu_acceleration_library import NPUModelForCausalLM, int4
|
5 |
-
from intel_npu_acceleration_library.compiler import CompilerConfig
|
6 |
from transformers import AutoTokenizer
|
7 |
|
8 |
from repository.repository_abc import Repository, Model
|
@@ -25,33 +25,35 @@ class IntelNpuRepository(Repository):
|
|
25 |
return self.message_history
|
26 |
|
27 |
def init(self):
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
self.
|
|
|
33 |
|
34 |
def send_prompt(self, prompt: str, add_to_history: bool = True) -> dict[str, str]:
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
|
1 |
import json
|
2 |
from pathlib import Path
|
3 |
|
4 |
+
# from intel_npu_acceleration_library import NPUModelForCausalLM, int4
|
5 |
+
# from intel_npu_acceleration_library.compiler import CompilerConfig
|
6 |
from transformers import AutoTokenizer
|
7 |
|
8 |
from repository.repository_abc import Repository, Model
|
|
|
25 |
return self.message_history
|
26 |
|
27 |
def init(self):
|
28 |
+
pass
|
29 |
+
# compiler_conf = CompilerConfig(dtype=int4)
|
30 |
+
# self.model = NPUModelForCausalLM.from_pretrained(self.model_info.name, use_cache=True, config=compiler_conf,
|
31 |
+
# export=True, temperature=0).eval()
|
32 |
+
# self.tokenizer = AutoTokenizer.from_pretrained(self.model_info.name)
|
33 |
+
# self.terminators = [self.tokenizer.eos_token_id, self.tokenizer.convert_tokens_to_ids("<|eot_id|>")]
|
34 |
|
35 |
def send_prompt(self, prompt: str, add_to_history: bool = True) -> dict[str, str]:
|
36 |
+
pass
|
37 |
+
# print("prompt to be sent: " + prompt)
|
38 |
+
# user_prompt = {"role": self.model_info.roles.user_role, "content": prompt}
|
39 |
+
# if self.log_to_file:
|
40 |
+
# with open(self.log_to_file, "a+") as log_file:
|
41 |
+
# log_file.write(json.dumps(user_prompt, indent=2))
|
42 |
+
# log_file.write("\n")
|
43 |
+
# self.get_message_history().append(user_prompt)
|
44 |
+
# input_ids = (self.tokenizer.apply_chat_template(self.get_message_history(), add_generation_prompt=True,
|
45 |
+
# return_tensors="pt")
|
46 |
+
# .to(self.model.device))
|
47 |
+
# outputs = self.model.generate(input_ids, eos_token_id=self.terminators, do_sample=True, max_new_tokens=2000, cache_position=None)
|
48 |
+
# generated_token_array = outputs[0][len(input_ids[0]):]
|
49 |
+
# generated_tokens = "".join(self.tokenizer.batch_decode(generated_token_array, skip_special_tokens=True))
|
50 |
+
# answer = {"role": self.get_model_info().roles.ai_role, "content": generated_tokens}
|
51 |
+
# if self.log_to_file:
|
52 |
+
# with open(self.log_to_file, "a+") as log_file:
|
53 |
+
# log_file.write(json.dumps(answer, indent=2))
|
54 |
+
# log_file.write("\n")
|
55 |
+
# if add_to_history:
|
56 |
+
# self.message_history.append(answer)
|
57 |
+
# else:
|
58 |
+
# self.message_history.pop()
|
59 |
+
# return answer
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
PyPDFForm
|
2 |
ollama
|
3 |
transformers
|
4 |
-
intel-npu-acceleration-library
|
|
|
1 |
PyPDFForm
|
2 |
ollama
|
3 |
transformers
|
4 |
+
# intel-npu-acceleration-library
|