Update README.md
Browse files
README.md
CHANGED
@@ -82,7 +82,7 @@ def instruction_generator(system_message: str, num_instructions: int) -> str:
|
|
82 |
if num_instructions < 1:
|
83 |
raise ValueError
|
84 |
magpie_template = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n"
|
85 |
-
input_ids = tokenizer(
|
86 |
for idx in range(num_instructions):
|
87 |
generated_ids = model.generate(input_ids, max_new_tokens=512, temperature=0.9, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id)
|
88 |
response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True)
|
|
|
82 |
if num_instructions < 1:
|
83 |
raise ValueError
|
84 |
magpie_template = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n"
|
85 |
+
input_ids = tokenizer(magpie_template, return_tensors="pt").input_ids.to("cuda")
|
86 |
for idx in range(num_instructions):
|
87 |
generated_ids = model.generate(input_ids, max_new_tokens=512, temperature=0.9, repetition_penalty=1.1, do_sample=True, eos_token_id=tokenizer.eos_token_id)
|
88 |
response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True, clean_up_tokenization_space=True)
|