Spaces:
Sleeping
Sleeping
Update tasks/text.py
Browse files- tasks/text.py +4 -2
tasks/text.py
CHANGED
@@ -107,8 +107,10 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
107 |
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
|
108 |
|
109 |
predictions = []
|
|
|
110 |
for batch in tqdm(test_dataloader):
|
111 |
-
|
|
|
112 |
|
113 |
b_input_ids, b_input_mask, b_token_type_ids = batch
|
114 |
with torch.no_grad():
|
@@ -116,7 +118,7 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
116 |
|
117 |
logits = logits.detach().cpu().numpy()
|
118 |
predictions.extend(logits.argmax(1))
|
119 |
-
|
120 |
|
121 |
true_labels = test_dataset["label"]
|
122 |
# Make random predictions (placeholder for actual model inference)
|
|
|
107 |
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
|
108 |
|
109 |
predictions = []
|
110 |
+
c=0
|
111 |
for batch in tqdm(test_dataloader):
|
112 |
+
print(c)
|
113 |
+
c+=1
|
114 |
|
115 |
b_input_ids, b_input_mask, b_token_type_ids = batch
|
116 |
with torch.no_grad():
|
|
|
118 |
|
119 |
logits = logits.detach().cpu().numpy()
|
120 |
predictions.extend(logits.argmax(1))
|
121 |
+
|
122 |
|
123 |
true_labels = test_dataset["label"]
|
124 |
# Make random predictions (placeholder for actual model inference)
|