pad commited on
Commit
0735287
·
verified ·
1 Parent(s): 0de2a3d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -3
README.md CHANGED
@@ -47,7 +47,8 @@ model, tokenizer = FastLanguageModel.from_pretrained(
47
 
48
  FastLanguageModel.for_inference(model) # Enable native 2x faster inference
49
 
50
- messages = [{"role": "user", "content": "<guidance_prompt>Count the number of 'r's in the word 'strawberry,' and then write a Python script that checks if an arbitrary word contains the same number of 'r's.</guidance_prompt>"}]
 
51
  inputs = tokenizer.apply_chat_template(
52
  messages,
53
  tokenize=True,
@@ -58,5 +59,8 @@ inputs = tokenizer.apply_chat_template(
58
  outputs = model.generate(input_ids=inputs, max_new_tokens=2000, use_cache=True, early_stopping=True, temperature=0)
59
  result = tokenizer.batch_decode(outputs)
60
 
61
- print(result[0][len(input_data):].replace("</s>", ""))
62
- ```
 
 
 
 
47
 
48
  FastLanguageModel.for_inference(model) # Enable native 2x faster inference
49
 
50
+ guidance_prompt = """<guidance_prompt>Count the number of 'r's in the word 'strawberry,' and then write a Python script that checks if an arbitrary word contains the same number of 'r's.</guidance_prompt>"""
51
+ messages = [{"role": "user", "content": guidance_prompt}]
52
  inputs = tokenizer.apply_chat_template(
53
  messages,
54
  tokenize=True,
 
59
  outputs = model.generate(input_ids=inputs, max_new_tokens=2000, use_cache=True, early_stopping=True, temperature=0)
60
  result = tokenizer.batch_decode(outputs)
61
 
62
+ print(result[0][len(guidance_prompt):].replace("</s>", ""))
63
+ ```
64
+
65
+ # Disclaimer
66
+ The model may occasionally fail to generate complete guidance, especially when the prompt includes specific instructions on how the responses should be structured. This limitation arises from the way the model was trained.