File size: 4,901 Bytes
7b41c88 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets import Dataset
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, classification_report
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.strategies import DDPStrategy
from transformers import AutoTokenizer, AutoModel, DataCollatorWithPadding, get_cosine_schedule_with_warmup
class DebertaClassifier(pl.LightningModule):
def __init__(self, num_labels=4, lr=2e-5, class_weights=None):
super().__init__()
self.save_hyperparameters()
self.model = AutoModel.from_pretrained("microsoft/deberta-v3-large")
self.dropout = nn.Dropout(0.3)
self.classifier = nn.Sequential(
nn.LayerNorm(self.model.config.hidden_size),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(self.model.config.hidden_size, num_labels)
)
if class_weights is not None:
weights = torch.tensor(class_weights, dtype=torch.float32)
self.loss_fn = nn.CrossEntropyLoss(weight=weights)
else:
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, input_ids, attention_mask):
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
cls_output = outputs.last_hidden_state[:, 0, :]
cls_output = self.dropout(cls_output)
return self.classifier(cls_output)
def training_step(self, batch, batch_idx):
input_ids, attention_mask, labels = batch["input_ids"], batch["attention_mask"], batch["labels"]
logits = self(input_ids, attention_mask)
loss = self.loss_fn(logits, labels)
preds = torch.argmax(logits, dim=1)
acc = accuracy_score(labels.cpu(), preds.cpu())
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", acc, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
input_ids, attention_mask, labels = batch["input_ids"], batch["attention_mask"], batch["labels"]
logits = self(input_ids, attention_mask)
loss = self.loss_fn(logits, labels)
preds = torch.argmax(logits, dim=1)
acc = accuracy_score(labels.cpu(), preds.cpu())
f1 = f1_score(labels.cpu(), preds.cpu(), average='weighted')
self.log("val_loss", loss, prog_bar=True)
self.log("val_acc", acc, prog_bar=True)
self.log("val_f1", f1, prog_bar=True, sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters(), lr=self.hparams.lr)
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=100,
num_training_steps=self.trainer.estimated_stepping_batches
)
return {"optimizer": optimizer, "lr_scheduler": scheduler, "interval": "step"}
if __name__ == "__main__":
df = pd.read_csv("data_cleaned2.csv")
print(df.head())
class_counts = df["labels"].value_counts().sort_index().tolist()
class_weights = 1.0 / np.array(class_counts)
class_weights = class_weights / class_weights.sum()
train_df = df.sample(frac=0.8, random_state=42)
val_df = df.drop(train_df.index)
tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-v3-large")
def tokenize(batch):
return tokenizer(batch["text"], truncation=True)
train_dataset = Dataset.from_pandas(train_df).map(tokenize, batched=True)
val_dataset = Dataset.from_pandas(val_df).map(tokenize, batched=True)
train_dataset.set_format("torch", columns=["input_ids", "attention_mask", "labels"])
val_dataset.set_format("torch", columns=["input_ids", "attention_mask", "labels"])
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=8, collate_fn=data_collator)
val_loader = DataLoader(val_dataset, batch_size=16, num_workers=8, collate_fn=data_collator)
checkpoint_callback = ModelCheckpoint(
dirpath="checkpoints/",
filename="deberta3-{epoch:02d}-{val_f1:.2f}",
save_top_k=2,
monitor="val_f1",
mode="max",
save_weights_only=True,
every_n_epochs=1
)
early_stopping = EarlyStopping(
monitor="val_f1",
patience=3,
mode="max",
verbose=True,
)
trainer = pl.Trainer(
accelerator="gpu",
devices=2,
strategy=DDPStrategy(find_unused_parameters=False),
max_epochs=10,
precision=16,
log_every_n_steps=10,
callbacks=[checkpoint_callback, early_stopping],
)
model = DebertaClassifier(class_weights=class_weights)
trainer.fit(model, train_loader, val_loader)
|