Commit
·
aab561f
1
Parent(s):
360bf72
Update README.md
Browse files
README.md
CHANGED
|
@@ -72,7 +72,7 @@ tokenizer = AutoTokenizer.from_pretrained("mwitiderrick/open_llama_3b_glaive_ass
|
|
| 72 |
model = AutoModelForCausalLM.from_pretrained("mwitiderrick/open_llama_3b_glaive_assistant_v0.1")
|
| 73 |
query = "Write a quick sort algorithm in Python"
|
| 74 |
text_gen = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
| 75 |
-
output = text_gen(f<s>[INST]{query}[/INST]")
|
| 76 |
print(output[0]['generated_text'])
|
| 77 |
"""
|
| 78 |
|
|
|
|
| 72 |
model = AutoModelForCausalLM.from_pretrained("mwitiderrick/open_llama_3b_glaive_assistant_v0.1")
|
| 73 |
query = "Write a quick sort algorithm in Python"
|
| 74 |
text_gen = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
| 75 |
+
output = text_gen(f"<s>[INST]{query}[/INST]")
|
| 76 |
print(output[0]['generated_text'])
|
| 77 |
"""
|
| 78 |
|