Update README.md
Browse files
README.md
CHANGED
@@ -36,13 +36,28 @@ The model is instruction-tuned to support a text-to-query use case for MongoDB a
|
|
36 |
## 📦 How to Use
|
37 |
|
38 |
```python
|
39 |
-
from
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
base = AutoModelForCausalLM.from_pretrained("unsloth/gemma-2b-it", load_in_4bit=True, device_map="auto")
|
43 |
-
tokenizer = AutoTokenizer.from_pretrained("unsloth/gemma-2b-it")
|
44 |
-
|
45 |
-
model = PeftModel.from_pretrained(base, "kihyun1998/gemma-2b-it-mongodb-lora")
|
46 |
|
47 |
prompt = """### Instruction:
|
48 |
Convert to MongoDB query string.
|
|
|
36 |
## 📦 How to Use
|
37 |
|
38 |
```python
|
39 |
+
from unsloth import FastLanguageModel
|
40 |
+
|
41 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
42 |
+
model_name = "unsloth/gemma-2b-it",
|
43 |
+
max_seq_length = 1024,
|
44 |
+
dtype = torch.float16,
|
45 |
+
load_in_4bit = True,
|
46 |
+
)
|
47 |
+
|
48 |
+
# Load LoRA adapter
|
49 |
+
model = FastLanguageModel.get_peft_model(
|
50 |
+
model,
|
51 |
+
r=16,
|
52 |
+
lora_alpha=32,
|
53 |
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
|
54 |
+
lora_dropout=0.05,
|
55 |
+
bias="none",
|
56 |
+
)
|
57 |
+
|
58 |
+
# Load parameter
|
59 |
+
model.load_adapter("kihyun1998/gemma-2b-it-mongodb-lora", adapter_name="default")
|
60 |
|
|
|
|
|
|
|
|
|
61 |
|
62 |
prompt = """### Instruction:
|
63 |
Convert to MongoDB query string.
|