mesutdmn commited on
Commit
e7f2ddb
·
1 Parent(s): 96dfa99

Add application file

Browse files
Files changed (3) hide show
  1. Dockerfile +13 -0
  2. app.py +83 -0
  3. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tokenizers import Tokenizer
3
+ from torch.utils.data import DataLoader
4
+ import uvicorn
5
+ from fastapi import FastAPI
6
+ from pydantic import BaseModel, Field
7
+
8
+ from model import CustomDataset, TransformerEncoder, load_model_to_cpu
9
+
10
+ app = FastAPI()
11
+
12
+
13
+
14
+ tag2id = {"O": 0, "olumsuz": 1, "nötr": 2, "olumlu": 3, "org": 4}
15
+ id2tag = {value: key for key, value in tag2id.items()}
16
+
17
+ device = torch.device('cpu')
18
+ def predict_fonk(model, device, example, tokenizer):
19
+ model.to(device)
20
+ model.eval()
21
+ predictions = []
22
+
23
+ encodings_prdict = tokenizer.encode(example)
24
+
25
+ predict_texts = [encodings_prdict.tokens]
26
+ predict_input_ids = [encodings_prdict.ids]
27
+ predict_attention_masks = [encodings_prdict.attention_mask]
28
+ predict_token_type_ids = [encodings_prdict.type_ids]
29
+ prediction_labels = [encodings_prdict.type_ids]
30
+
31
+ predict_data = CustomDataset(predict_texts, predict_input_ids, predict_attention_masks, predict_token_type_ids,
32
+ prediction_labels)
33
+
34
+ predict_loader = DataLoader(predict_data, batch_size=1, shuffle=False)
35
+
36
+ with torch.no_grad():
37
+ for dataset in predict_loader:
38
+ batch_input_ids = dataset['input_ids'].to(device)
39
+ batch_att_mask = dataset['attention_mask'].to(device)
40
+
41
+ outputs = model(batch_input_ids, batch_att_mask)
42
+ logits = outputs.view(-1, outputs.size(-1)) # Flatten the outputs
43
+ _, predicted = torch.max(logits, 1)
44
+
45
+ # Ignore padding tokens for predictions
46
+ predictions.append(predicted)
47
+
48
+ results_list = []
49
+ entity_list = []
50
+ results_dict = {}
51
+ for i, (token, label, attention) in enumerate(zip(predict_loader.dataset[0]["text"], predictions[0].tolist(),
52
+ predict_attention_masks[0])):
53
+ if attention != 0 and label != 0 and label !=4 and token not in [sep for sepx in entity_list for sep in sepx.split()]:
54
+ for next_ones in predictions[0].tolist()[i+1:]:
55
+ i+=1
56
+ if next_ones == 4:
57
+ token = token +" "+ predict_loader.dataset[0]["text"][i]
58
+ else:break
59
+ if token not in entity_list:
60
+ entity_list.append(token)
61
+ results_list.append({"entity":token,"sentiment":id2tag.get(label)})
62
+
63
+
64
+ results_dict["entity_list"] = entity_list
65
+ results_dict["results"] = results_list
66
+
67
+
68
+ return results_dict
69
+
70
+ class Item(BaseModel):
71
+ text: str = Field(..., example="""Fiber 100mb SuperOnline kullanıcısıyım yaklaşık 2 haftadır @Twitch @Kick_Turkey gibi canlı yayın platformlarında 360p yayın izlerken donmalar yaşıyoruz. Başka hiç bir operatörler bu sorunu yaşamazken ben parasını verip alamadığım hizmeti neden ödeyeyim ? @Turkcell """)
72
+
73
+ @app.post("/predict/", response_model=dict)
74
+ async def predict(item: Item):
75
+ model = TransformerEncoder()
76
+ model, start_epoch = load_model_to_cpu(model, "model.pth")
77
+ tokenizer = Tokenizer.from_file("tokenizer.json")
78
+
79
+ predict_list = predict_fonk(model=model, device=device, example=item.text, tokenizer=tokenizer)
80
+
81
+ #Buraya model'in çıktısı gelecek
82
+ #Çıktı formatı aşağıdaki örnek gibi olacak
83
+ return predict_list
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch==2.3.0
2
+ tokenizers==0.13.3
3
+ uvicorn
4
+ fastapi
5
+ pydantic