|
--- |
|
base_model: Locutusque/TinyMistral-248M-v2.5-Instruct |
|
license: apache-2.0 |
|
datasets: |
|
- HuggingFaceH4/ultrafeedback_binarized |
|
--- |
|
|
|
``` |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("heegyu/TinyMistral-248M-v2.5-Instruct-orpo") |
|
model = AutoModelForCausalLM.from_pretrained("heegyu/TinyMistral-248M-v2.5-Instruct-orpo") |
|
|
|
conv = [ |
|
{ |
|
'role': 'user', |
|
'content': 'What can I do with Large Language Model?' |
|
} |
|
] |
|
prompt = tokenizer.apply_chat_template(conv, add_generation_prompt=True, return_tensors="pt") |
|
output = model.generate(prompt, max_new_token=128) |
|
print(tokenizer.decode(output[0])) |
|
``` |