Update README.md
Browse files
README.md
CHANGED
@@ -105,7 +105,13 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
105 |
device = "cuda"
|
106 |
model = AutoModelForCausalLM.from_pretrained("PipableAI/pip-code-to-doc-1.3b").to(device)
|
107 |
tokenizer = AutoTokenizer.from_pretrained("PipableAI/pip-code-to-doc-1.3b")
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
inputs = tokenizer(text, return_tensors="pt")
|
110 |
outputs = model.generate(**inputs, max_new_tokens=300)
|
111 |
tokenizer.decode(outputs[0], skip_special_tokens=True).split('<doc>')[-1].split('</doc>')[0]
|
|
|
105 |
device = "cuda"
|
106 |
model = AutoModelForCausalLM.from_pretrained("PipableAI/pip-code-to-doc-1.3b").to(device)
|
107 |
tokenizer = AutoTokenizer.from_pretrained("PipableAI/pip-code-to-doc-1.3b")
|
108 |
+
prompt = f"""
|
109 |
+
<function_code>
|
110 |
+
def example_function(x):
|
111 |
+
return x * 2
|
112 |
+
</function_code>
|
113 |
+
<question>Give one line description of the python code above in natural language.</question>
|
114 |
+
<doc>"""
|
115 |
inputs = tokenizer(text, return_tensors="pt")
|
116 |
outputs = model.generate(**inputs, max_new_tokens=300)
|
117 |
tokenizer.decode(outputs[0], skip_special_tokens=True).split('<doc>')[-1].split('</doc>')[0]
|