Update README.md
Browse files
README.md
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
-
|
2 |
-
|
3 |
---
|
4 |
|
5 |
# MasterControlAIML R1-Qwen2.5-1.5b SFT R1 JSON Unstructured-To-Structured LoRA Model
|
@@ -76,7 +75,7 @@ from unsloth import FastLanguageModel
|
|
76 |
import torch
|
77 |
|
78 |
# Specify the model name
|
79 |
-
MODEL = "MasterControlAIML/R1-Qwen2.5-1.5b-SFT-R1-JSON-Unstructured-To-Structured
|
80 |
|
81 |
# Load the model and tokenizer
|
82 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
@@ -118,7 +117,7 @@ If you prefer to use Hugging Face's Transformers directly, here’s an alternati
|
|
118 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
|
119 |
import torch
|
120 |
|
121 |
-
MODEL = "MasterControlAIML/R1-Qwen2.5-1.5b-SFT-R1-JSON-Unstructured-To-Structured
|
122 |
|
123 |
# Initialize tokenizer and model
|
124 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
@@ -133,7 +132,7 @@ Below is an instruction that describes a task, paired with an input that provide
|
|
133 |
"""
|
134 |
|
135 |
# Define your text input
|
136 |
-
TEXT = "
|
137 |
prompt = ALPACA_PROMPT.format(TEXT, "")
|
138 |
inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
|
139 |
text_streamer = TextStreamer(tokenizer)
|
|
|
1 |
+
Model
|
|
|
2 |
---
|
3 |
|
4 |
# MasterControlAIML R1-Qwen2.5-1.5b SFT R1 JSON Unstructured-To-Structured LoRA Model
|
|
|
75 |
import torch
|
76 |
|
77 |
# Specify the model name
|
78 |
+
MODEL = "MasterControlAIML/DeepSeek-R1-Qwen2.5-1.5b-SFT-R1-JSON-Unstructured-To-Structured"
|
79 |
|
80 |
# Load the model and tokenizer
|
81 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
|
|
117 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
|
118 |
import torch
|
119 |
|
120 |
+
MODEL = "MasterControlAIML/DeepSeek-R1-Qwen2.5-1.5b-SFT-R1-JSON-Unstructured-To-Structured"
|
121 |
|
122 |
# Initialize tokenizer and model
|
123 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
|
|
132 |
"""
|
133 |
|
134 |
# Define your text input
|
135 |
+
TEXT = ""
|
136 |
prompt = ALPACA_PROMPT.format(TEXT, "")
|
137 |
inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
|
138 |
text_streamer = TextStreamer(tokenizer)
|