Update README.md
Browse files
README.md
CHANGED
@@ -76,9 +76,9 @@ import torch
|
|
76 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
77 |
|
78 |
# Load the HelpingAI2.5-10B model
|
79 |
-
model = AutoModelForCausalLM.from_pretrained("
|
80 |
# Load the tokenizer
|
81 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
82 |
|
83 |
# Define the chat input
|
84 |
chat = [
|
@@ -95,7 +95,7 @@ print(tokenizer.decode(response, skip_special_tokens=True))
|
|
95 |
|
96 |
```python
|
97 |
from webscout.Local import *
|
98 |
-
model_path = download_model("
|
99 |
model = Model(model_path, n_gpu_layers=0, context_length=4096)
|
100 |
|
101 |
thread = Thread(model, format=helpingai2)
|
|
|
76 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
77 |
|
78 |
# Load the HelpingAI2.5-10B model
|
79 |
+
model = AutoModelForCausalLM.from_pretrained("HelpingAI/HelpingAI2.5-10B")
|
80 |
# Load the tokenizer
|
81 |
+
tokenizer = AutoTokenizer.from_pretrained("HelpingAI/HelpingAI2.5-10B")
|
82 |
|
83 |
# Define the chat input
|
84 |
chat = [
|
|
|
95 |
|
96 |
```python
|
97 |
from webscout.Local import *
|
98 |
+
model_path = download_model("HelpingAI/HelpingAI2.5-10B", "q4_k_m.gguf", token=None)
|
99 |
model = Model(model_path, n_gpu_layers=0, context_length=4096)
|
100 |
|
101 |
thread = Thread(model, format=helpingai2)
|