mwitiderrick commited on
Commit
5b87189
·
1 Parent(s): e17167a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +18 -4
README.md CHANGED
@@ -72,14 +72,28 @@ This is an [OpenLlama model](https://huggingface.co/openlm-research/open_llama_3
72
  ```python
73
  from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline
74
 
75
- tokenizer = AutoTokenizer.from_pretrained("mwitiderrick/open_llama_3b_instruct_v_0.2")
76
- model = AutoModelForCausalLM.from_pretrained("mwitiderrick/open_llama_3b_instruct_v_0.2")
77
- query = "Provide step-by-step instructions for making a sweet chicken bugger"
78
- text_gen = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=500)
79
  output = text_gen(f"### Instruction:\n{query}\n### Response:\n")
80
  print(output[0]['generated_text'])
81
  """
 
 
 
 
 
 
 
 
 
 
 
 
82
 
 
 
83
  """
84
  ```
85
  ## Metrics
 
72
  ```python
73
  from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline
74
 
75
+ tokenizer = AutoTokenizer.from_pretrained("mwitiderrick/open_llama_3b_code_instruct_v0.1")
76
+ model = AutoModelForCausalLM.from_pretrained("mwitiderrick/open_llama_3b_code_instruct_v0.1")
77
+ query = "write a quick sort algorithm in Python"
78
+ text_gen = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
79
  output = text_gen(f"### Instruction:\n{query}\n### Response:\n")
80
  print(output[0]['generated_text'])
81
  """
82
+ ### Instruction:
83
+ write a quick sort algorithm in Python
84
+ ### Response:
85
+ def quick_sort(arr):
86
+ if len(arr) <= 1:
87
+ return arr
88
+ else:
89
+ pivot = arr[len(arr) // 2]
90
+ left = [x for x in arr if x < pivot]
91
+ middle = [x for x in arr if x == pivot]
92
+ right = [x for x in arr if x > pivot]
93
+ return quick_sort(left) + middle + quick_sort(right)
94
 
95
+ arr = [1, 2, 3, 4, 5]
96
+ print(quick_sort(arr))
97
  """
98
  ```
99
  ## Metrics