matthieumeeus97 commited on
Commit
fd5198f
·
verified ·
1 Parent(s): 3421242

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -0
README.md CHANGED
@@ -30,6 +30,43 @@ It achieves the following results on the evaluation set:
30
  - Logits/rejected: -0.8732
31
  - Logits/chosen: -0.9594
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  ## Model description
34
 
35
  More information needed
 
30
  - Logits/rejected: -0.8732
31
  - Logits/chosen: -0.9594
32
 
33
+ # use the model
34
+
35
+ ```
36
+ from transformers import AutoTokenizer, AutoModelForCausalLM
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained('ChocoLlama/ChocoLlama-2-7B-instruct')
39
+ model = AutoModelForCausalLM.from_pretrained('ChocoLlama/ChocoLlama-2-7B-instruct', device_map="auto")
40
+
41
+ messages = [
42
+ {"role": "system", "content": "Je bent een artificiële intelligentie-assistent en geeft behulpzame, gedetailleerde en beleefde antwoorden op de vragen van de gebruiker."},
43
+ {"role": "user", "content": "Jacques brel, Willem Elsschot en Jan Jambon zitten op café. Waar zouden ze over babbelen?"},
44
+ ]
45
+
46
+ input_ids = new_tokenizer.apply_chat_template(
47
+ messages,
48
+ add_generation_prompt=True,
49
+ return_tensors="pt"
50
+ ).to(new_model.device)
51
+
52
+ new_terminators = [
53
+ new_tokenizer.eos_token_id,
54
+ new_tokenizer.convert_tokens_to_ids("<|eot_id|>")
55
+ ]
56
+
57
+ outputs = new_model.generate(
58
+ input_ids,
59
+ max_new_tokens=512,
60
+ eos_token_id=new_terminators,
61
+ do_sample=True,
62
+ temperature=0.8,
63
+ top_p=0.95,
64
+ )
65
+ response = outputs[0][input_ids.shape[-1]:]
66
+ print(new_tokenizer.decode(response, skip_special_tokens=True))
67
+
68
+ ```
69
+
70
  ## Model description
71
 
72
  More information needed