perman2011 commited on
Commit
6ef12b9
·
1 Parent(s): bab1cdc

Delete DistilBERT.py

Browse files
Files changed (1) hide show
  1. DistilBERT.py +0 -145
DistilBERT.py DELETED
@@ -1,145 +0,0 @@
1
- import transformers
2
- import torch
3
- from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
4
- from transformers import DistilBertTokenizer, DistilBertModel
5
- import logging
6
- logging.basicConfig(level=logging.ERROR)
7
- import torch.nn as nn
8
- from torch.nn import functional as F
9
- import torch.optim as optim
10
- import pandas as pd
11
- import numpy as np
12
-
13
- # Điều chỉnh các tham số
14
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
15
- MAX_LEN = 100
16
- TRAIN_BATCH_SIZE = 4
17
- VALID_BATCH_SIZE = 4
18
- EPOCHS = 1
19
- LEARNING_RATE = 1e-05
20
- tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', truncation=True, do_lower_case=True)
21
-
22
- # Tạo dataframe
23
- train_df_DB = pd.read_csv('./data/train.csv')
24
- train_df_DB['label'] = train_df_DB.iloc[:, 1:].values.tolist()
25
- test_df_DB = pd.read_csv('./data/test.csv')
26
- test_df_DB = test_df_DB[['text', 'preprocess_sentence', 'label']]
27
- test_df_DB['label'] = test_df_DB.iloc[:, 2:].values.tolist()
28
-
29
- # Tạo class
30
- class BinaryLabel(Dataset):
31
-
32
- def __init__(self, dataframe, tokenizer, max_len):
33
- self.tokenizer = tokenizer
34
- self.data = dataframe
35
- self.text = dataframe.text
36
- self.targets = self.data.label
37
- self.max_len = max_len
38
-
39
- def __len__(self):
40
- return len(self.text)
41
-
42
- def __getitem__(self, index):
43
- text = str(self.text[index])
44
- text = " ".join(text.split())
45
-
46
- inputs = self.tokenizer.encode_plus(
47
- text,
48
- None,
49
- add_special_tokens=True,
50
- max_length=self.max_len,
51
- pad_to_max_length=True,
52
- return_token_type_ids=True
53
- )
54
- ids = inputs['input_ids']
55
- mask = inputs['attention_mask']
56
- token_type_ids = inputs["token_type_ids"]
57
-
58
-
59
- return {
60
- 'ids': torch.tensor(ids, dtype=torch.long),
61
- 'mask': torch.tensor(mask, dtype=torch.long),
62
- 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
63
- 'targets': torch.tensor(self.targets[index], dtype=torch.float)
64
- }
65
-
66
- train_params = {'batch_size': TRAIN_BATCH_SIZE,
67
- 'shuffle': True,
68
- 'num_workers': 0
69
- }
70
-
71
- test_params = {'batch_size': VALID_BATCH_SIZE,
72
- 'shuffle': True,
73
- 'num_workers': 0
74
- }
75
-
76
- training_set = BinaryLabel(train_df_DB, tokenizer, MAX_LEN)
77
- testing_set = BinaryLabel(test_df_DB, tokenizer, MAX_LEN)
78
-
79
- training_loader = DataLoader(training_set, **train_params)
80
- testing_loader = DataLoader(testing_set, **test_params)
81
-
82
- # Create model
83
- class DistilBERTClass(torch.nn.Module):
84
- def __init__(self):
85
- super(DistilBERTClass, self).__init__()
86
- self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
87
- self.pre_classifier = torch.nn.Linear(768, 768)
88
- self.dropout = torch.nn.Dropout(0.1)
89
- self.classifier = torch.nn.Linear(768, 1)
90
-
91
- def forward(self, input_ids, attention_mask, token_type_ids):
92
- output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
93
- hidden_state = output_1[0]
94
- pooler = hidden_state[:, 0]
95
- pooler = self.pre_classifier(pooler)
96
- pooler = torch.nn.ReLU()(pooler)
97
- pooler = self.dropout(pooler)
98
- output = self.classifier(pooler)
99
- return output
100
-
101
-
102
- # Validation function
103
- def validation(testing_loader):
104
- model_DB.eval()
105
- fin_targets=[]
106
- fin_outputs=[]
107
- with torch.no_grad():
108
- for _, data in tqdm(enumerate(testing_loader, 0)):
109
- ids = data['ids'].to(device, dtype = torch.long)
110
- mask = data['mask'].to(device, dtype = torch.long)
111
- token_type_ids = data['token_type_ids'].to(device, dtype = torch.long)
112
- targets = data['targets'].to(device, dtype = torch.float)
113
- outputs = model_DB(ids, mask, token_type_ids)
114
- fin_targets.extend(targets.cpu().detach().numpy().tolist())
115
- fin_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist())
116
- return fin_outputs, fin_targets
117
-
118
- # Train function
119
- def train(epoch):
120
- model.train()
121
- for _,data in tqdm(enumerate(training_loader, 0)):
122
- ids = data['ids'].to(device, dtype = torch.long)
123
- mask = data['mask'].to(device, dtype = torch.long)
124
- token_type_ids = data['token_type_ids'].to(device, dtype = torch.long)
125
- targets = data['targets'].to(device, dtype = torch.float)
126
-
127
- outputs = model(ids, mask, token_type_ids)
128
-
129
- optimizer.zero_grad()
130
- loss = loss_fn(outputs, targets)
131
- if _%50==0:
132
- print(f'Epoch: {epoch}, Loss: {loss.item()}')
133
- if loss.item() < 0.07:
134
- print(f'Breaking the loop as loss is below 0.07: {loss.item()}')
135
- break
136
- loss.backward()
137
- optimizer.step()
138
- def loss_fn(outputs, targets):
139
- return torch.nn.BCEWithLogitsLoss()(outputs, targets)
140
-
141
- model_DB = DistilBERTClass()
142
- optimizer = torch.optim.Adam(params = model_DB.parameters(), lr=LEARNING_RATE)
143
-
144
- loaded_model_path = './model_DB_1.pt'
145
- model_DB.load_state_dict(torch.load(loaded_model_path, map_location=torch.device('cpu')))