Update tasks/text.py
Browse files- tasks/text.py +11 -11
tasks/text.py
CHANGED
@@ -45,24 +45,14 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
45 |
}
|
46 |
|
47 |
# Load and prepare the dataset
|
48 |
-
dataset = load_dataset(request.dataset_name)
|
49 |
|
50 |
# Convert string labels to integers
|
51 |
dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]})
|
52 |
|
53 |
# Split dataset
|
54 |
-
train_test = dataset["train"]
|
55 |
test_dataset = dataset["test"]
|
56 |
-
|
57 |
-
# Start tracking emissions
|
58 |
-
tracker.start()
|
59 |
-
tracker.start_task("inference")
|
60 |
|
61 |
-
#--------------------------------------------------------------------------------------------
|
62 |
-
# YOUR MODEL INFERENCE CODE HERE
|
63 |
-
# Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
|
64 |
-
#--------------------------------------------------------------------------------------------
|
65 |
-
|
66 |
# Make random predictions (placeholder for actual model inference)
|
67 |
true_labels = test_dataset["label"]
|
68 |
predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
|
@@ -84,6 +74,16 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
84 |
model = PeftModel.from_pretrained(base_model, path_adapter)
|
85 |
model.eval()
|
86 |
tokenizer = AutoTokenizer.from_pretrained(path_model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
tokenizer.pad_token = tokenizer.eos_token # Or any other token depending on your model
|
88 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
89 |
|
|
|
45 |
}
|
46 |
|
47 |
# Load and prepare the dataset
|
48 |
+
dataset = load_dataset(request.dataset_name, token=os.getenv("HF_TOKEN"))
|
49 |
|
50 |
# Convert string labels to integers
|
51 |
dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]})
|
52 |
|
53 |
# Split dataset
|
|
|
54 |
test_dataset = dataset["test"]
|
|
|
|
|
|
|
|
|
55 |
|
|
|
|
|
|
|
|
|
|
|
56 |
# Make random predictions (placeholder for actual model inference)
|
57 |
true_labels = test_dataset["label"]
|
58 |
predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
|
|
|
74 |
model = PeftModel.from_pretrained(base_model, path_adapter)
|
75 |
model.eval()
|
76 |
tokenizer = AutoTokenizer.from_pretrained(path_model)
|
77 |
+
|
78 |
+
# Start tracking emissions
|
79 |
+
tracker.start()
|
80 |
+
tracker.start_task("inference")
|
81 |
+
|
82 |
+
#--------------------------------------------------------------------------------------------
|
83 |
+
# YOUR MODEL INFERENCE CODE HERE
|
84 |
+
# Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
|
85 |
+
#--------------------------------------------------------------------------------------------
|
86 |
+
|
87 |
tokenizer.pad_token = tokenizer.eos_token # Or any other token depending on your model
|
88 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
89 |
|