Update README.md
Browse files
README.md
CHANGED
@@ -47,6 +47,7 @@ text = """<schema>{schema}</schema>
|
|
47 |
<question>{question}</question>
|
48 |
<sql>"""
|
49 |
```
|
|
|
50 |
|
51 |
```python
|
52 |
from transformers import AutoModelForCasualLM, AutoTokenizer
|
@@ -58,6 +59,14 @@ inputs = tokenizer(text, return_tensors="pt")
|
|
58 |
outputs = model.generate(**inputs, max_new_tokens=200)
|
59 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True).split('<sql>')[1].split('</sql>')[0])
|
60 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
## The PipableAI team
|
63 |
|
|
|
47 |
<question>{question}</question>
|
48 |
<sql>"""
|
49 |
```
|
50 |
+
pytorch
|
51 |
|
52 |
```python
|
53 |
from transformers import AutoModelForCasualLM, AutoTokenizer
|
|
|
59 |
outputs = model.generate(**inputs, max_new_tokens=200)
|
60 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True).split('<sql>')[1].split('</sql>')[0])
|
61 |
```
|
62 |
+
flax
|
63 |
+
|
64 |
+
```python
|
65 |
+
from transformers import AutoModelForCasualLM, AutoTokenizer
|
66 |
+
device = "cuda"
|
67 |
+
model = FlaxAutoModelForCausalLM.from_pretrained("PipableAI/pipSQL1b" , from_pt=True)
|
68 |
+
tokenizer = AutoTokenizer.from_pretrained("PipableAI/pipSQL1b")
|
69 |
+
```
|
70 |
|
71 |
## The PipableAI team
|
72 |
|