Update README.md
Browse files
README.md
CHANGED
@@ -78,25 +78,25 @@ fine-tuned further for similar tasks in different contexts.
|
|
78 |
Use the code snippet below to get started with the model:
|
79 |
|
80 |
```python
|
81 |
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
82 |
import torch
|
|
|
|
|
83 |
|
84 |
model_name = "tferhan/Intent-GovMa-v1"
|
85 |
|
86 |
-
# Load the tokenizer and model
|
87 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
88 |
-
model =
|
89 |
-
|
90 |
-
|
91 |
-
questions = ["qu'est ce que open data",
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
print(f"predicted type of question: {class_names[torch.argmax(outputs.logits).item()]}\n")
|
100 |
```
|
101 |
|
102 |
## Training Details
|
|
|
78 |
Use the code snippet below to get started with the model:
|
79 |
|
80 |
```python
|
81 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
82 |
import torch
|
83 |
+
from peft import AutoPeftModelForSequenceClassification
|
84 |
+
|
85 |
|
86 |
model_name = "tferhan/Intent-GovMa-v1"
|
87 |
|
|
|
88 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
89 |
+
model = AutoPeftModelForSequenceClassification.from_pretrained(model_name)
|
90 |
+
nlp_pipeline = pipeline("text-classification", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
|
91 |
+
|
92 |
+
questions = ["qu'est ce que open data", "je veux les informations de l'eau potable"]
|
93 |
+
results = nlp_pipeline_class(questions)
|
94 |
+
|
95 |
+
for result in results:
|
96 |
+
print(result)
|
97 |
+
|
98 |
+
#{'label': 'LABEL_0', 'score': 0.9999700784683228} === general
|
99 |
+
#{'label': 'LABEL_1', 'score': 0.9994990825653076} === request_data
|
|
|
100 |
```
|
101 |
|
102 |
## Training Details
|