Update README.md
Browse files
README.md
CHANGED
@@ -14,86 +14,4 @@ Pico v1 is a work in progress model. Based off Phi 3.5 Mini, it has been fine tu
|
|
14 |
|
15 |
When making a output, Pico will create three sections, a reasoning section, a self-reflection section and a output section.
|
16 |
|
17 |
-
Pico v1 struggles with non-question related tasks (Small talk, roleplay, etc).
|
18 |
-
|
19 |
-
Here is a example of how you can use it:
|
20 |
-
|
21 |
-
```from transformers import AutoModelForCausalLM, AutoTokenizer
|
22 |
-
import torch
|
23 |
-
|
24 |
-
phi3_template = (
|
25 |
-
"{{ bos_token }}"
|
26 |
-
"{% for message in messages %}"
|
27 |
-
"{{ '<|' + message['role'] + '|>\\n' + message['content'] + '<|end|>\\n' }}"
|
28 |
-
"{% endfor %}"
|
29 |
-
"{% if add_generation_prompt %}"
|
30 |
-
"{{ '<|assistant|>\\n' }}"
|
31 |
-
"{% endif %}"
|
32 |
-
)
|
33 |
-
phi3_template_eos_token = "<|end|>"
|
34 |
-
|
35 |
-
def build_prompt(messages, bos_token="<|start|>", add_generation_prompt=True):
|
36 |
-
"""
|
37 |
-
Build a prompt using the PHI 3.5 template.
|
38 |
-
"""
|
39 |
-
prompt = bos_token
|
40 |
-
for message in messages:
|
41 |
-
prompt += f"<|{message['role']}|>\n{message['content']}\n<|end|>\n"
|
42 |
-
if add_generation_prompt:
|
43 |
-
prompt += "<|assistant|>\n"
|
44 |
-
return prompt
|
45 |
-
|
46 |
-
def chat_with_model():
|
47 |
-
# Load the model and tokenizer
|
48 |
-
model_name = "LucidityAI/Pico-v1-3b"
|
49 |
-
print("Loading model and tokenizer...")
|
50 |
-
|
51 |
-
# Enforce GPU usage
|
52 |
-
if not torch.cuda.is_available():
|
53 |
-
raise RuntimeError("CUDA is not available. Please ensure your GPU and CUDA environment are configured correctly.")
|
54 |
-
|
55 |
-
device = torch.device("cuda")
|
56 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
57 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
|
58 |
-
print("Model and tokenizer loaded successfully.")
|
59 |
-
|
60 |
-
# Chat loop
|
61 |
-
print("Start chatting with the model! Type 'exit' to quit.")
|
62 |
-
conversation = []
|
63 |
-
while True:
|
64 |
-
user_input = input("You: ")
|
65 |
-
if user_input.lower() == "exit":
|
66 |
-
print("Goodbye!")
|
67 |
-
break
|
68 |
-
|
69 |
-
# Append user's message to the conversation
|
70 |
-
conversation.append({"role": "user", "content": user_input})
|
71 |
-
|
72 |
-
# Build the input prompt using the PHI 3.5 template
|
73 |
-
prompt = build_prompt(conversation, bos_token=tokenizer.bos_token or "<|start|>")
|
74 |
-
|
75 |
-
# Tokenize the input prompt
|
76 |
-
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to(device)
|
77 |
-
|
78 |
-
# Generate a response
|
79 |
-
outputs = model.generate(
|
80 |
-
inputs.input_ids,
|
81 |
-
max_length=1024,
|
82 |
-
num_return_sequences=1,
|
83 |
-
temperature=0.5,
|
84 |
-
pad_token_id=tokenizer.eos_token_id
|
85 |
-
)
|
86 |
-
|
87 |
-
# Decode the response
|
88 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
89 |
-
|
90 |
-
# Extract the assistant's reply
|
91 |
-
assistant_reply = response[len(prompt):].strip()
|
92 |
-
print(f"Model: {assistant_reply}")
|
93 |
-
|
94 |
-
# Append the assistant's reply to the conversation
|
95 |
-
conversation.append({"role": "assistant", "content": assistant_reply})
|
96 |
-
|
97 |
-
if __name__ == "__main__":
|
98 |
-
chat_with_model()
|
99 |
-
```
|
|
|
14 |
|
15 |
When making a output, Pico will create three sections, a reasoning section, a self-reflection section and a output section.
|
16 |
|
17 |
+
Pico v1 struggles with non-question related tasks (Small talk, roleplay, etc).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|