File size: 766 Bytes
83a1511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33


```python
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from datasets import Dataset

device = "cuda"
path = "Unbabel/mfineweb-edu-classifier"
model = AutoModelForSequenceClassification.from_pretrained(
    path, 
    device_map=device, 
    trust_remote_code=True, 
    torch_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(path, use_fast=True)

def tokenize(examples):
    return tokenizer(examples["text"], truncation=True, max_length=512)

texts = [
    "This is a text",
    "this is another text to classify"
]

model_inputs = [
    tokenizer(text, truncation=True, max_length=512) for text in texts
]

with torch.no_grad():
    for model_input in model_inputs:
        output = model(input_ids)
```