Update README.md
Browse files
README.md
CHANGED
@@ -7,6 +7,8 @@ license: apache-2.0
|
|
7 |
base_model:
|
8 |
- mistralai/Mistral-Nemo-Instruct-2407
|
9 |
pipeline_tag: reinforcement-learning
|
|
|
|
|
10 |
---
|
11 |
# Model Card: Mistral-Nemo-Instruct-2407_ORPO- Fine-Tuned for Text-to-SQL
|
12 |
|
@@ -79,9 +81,9 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
|
79 |
from peft import PeftConfig,PeftModel
|
80 |
|
81 |
# Load the fine-tuned peft model
|
82 |
-
peft_config = PeftConfig.from_pretrained(
|
83 |
model = AutoModelForCausalLM.from_pretrained(peft_config.base_model_name_or_path)
|
84 |
-
model = PeftModel.from_pretrained(model,
|
85 |
|
86 |
|
87 |
# Load the fine-tuned model
|
|
|
7 |
base_model:
|
8 |
- mistralai/Mistral-Nemo-Instruct-2407
|
9 |
pipeline_tag: reinforcement-learning
|
10 |
+
datasets:
|
11 |
+
- gretelai/synthetic_text_to_sql
|
12 |
---
|
13 |
# Model Card: Mistral-Nemo-Instruct-2407_ORPO- Fine-Tuned for Text-to-SQL
|
14 |
|
|
|
81 |
from peft import PeftConfig,PeftModel
|
82 |
|
83 |
# Load the fine-tuned peft model
|
84 |
+
peft_config = PeftConfig.from_pretrained("JHuel/Mistral-Nemo-Instruct-2407_DPO_qlora")
|
85 |
model = AutoModelForCausalLM.from_pretrained(peft_config.base_model_name_or_path)
|
86 |
+
model = PeftModel.from_pretrained(model, "JHuel/Mistral-Nemo-Instruct-2407_DPO_qlora")
|
87 |
|
88 |
|
89 |
# Load the fine-tuned model
|