Spaces:
Sleeping
Sleeping
File size: 6,035 Bytes
e6d07cd 4d6e8c2 ece5856 ebc55a1 1f08781 4d6e8c2 ed458ce 4d6e8c2 c339ecd 1c33274 7eb6153 aaabc84 2c8310a 4d6e8c2 2c8310a e6d07cd 2c8310a e6d07cd 85c5204 e6d07cd 2c8310a aaabc84 6f0e9af 2c8310a 6f0e9af c339ecd 869fc52 275c5df 485bf3f 275c5df c339ecd b10572e a8d5c7d b10572e 275c5df 62bb2f0 869fc52 275c5df 869fc52 62bb2f0 2c8310a b4aa97d 2c8310a e6d07cd ed458ce 275c5df 2c8310a 6f0e9af 2c8310a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
from fastapi import APIRouter
from datetime import datetime
from datasets import load_dataset
from sklearn.metrics import accuracy_score
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, ModernBertConfig
from torch.utils.data import DataLoader
from transformers import DataCollatorWithPadding
from .utils.evaluation import TextEvaluationRequest
from .utils.emissions import tracker, clean_emissions_data, get_space_info
router = APIRouter()
DESCRIPTION = "Climate Guard Toxic Agent is a ModernBERT for Climate Disinformation Detection"
ROUTE = "/text"
@router.post(ROUTE, tags=["Text Task"],
description=DESCRIPTION)
async def evaluate_text(request: TextEvaluationRequest):
"""
Evaluate text classification for climate disinformation detection using ModernBERT.
"""
# Get space info
username, space_url = get_space_info()
# Define the label mapping
LABEL_MAPPING = {
"0_not_relevant": 0,
"1_not_happening": 1,
"2_not_human": 2,
"3_not_bad": 3,
"4_solutions_harmful_unnecessary": 4,
"5_science_unreliable": 5,
"6_proponents_biased": 6,
"7_fossil_fuels_needed": 7
}
# Load and prepare the dataset
dataset = load_dataset(request.dataset_name)
# Convert string labels to integers
dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]})
# Get test dataset
test_dataset = dataset["test"]
# Start tracking emissions
tracker.start()
tracker.start_task("inference")
#--------------------------------------------------------------------------------------------
# MODEL INFERENCE CODE
#--------------------------------------------------------------------------------------------
try:
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model and tokenizer paths
model_name = "Tonic/climate-guard-toxic-agent"
tokenizer_name = "Tonic/climate-guard-toxic-agent"
# Create config
config = ModernBertConfig(
vocab_size=50368,
hidden_size=768,
num_hidden_layers=22,
num_attention_heads=12,
intermediate_size=1152,
max_position_embeddings=8192,
layer_norm_eps=1e-5,
position_embedding_type="absolute",
pad_token_id=50283,
bos_token_id=50281,
eos_token_id=50282,
sep_token_id=50282,
cls_token_id=50281,
hidden_activation="gelu",
classifier_activation="gelu",
classifier_pooling="mean",
num_labels=8,
id2label={str(i): label for i, label in enumerate(LABEL_MAPPING.keys())},
label2id=LABEL_MAPPING,
problem_type="single_label_classification",
architectures=["ModernBertForSequenceClassification"],
model_type="modernbert"
)
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
# Load model
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
config=config,
trust_remote_code=True,
ignore_mismatched_sizes=True,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
).to(device)
# Set model to evaluation mode
model.eval()
# Preprocess function
def preprocess_function(examples):
return tokenizer(
examples["quote"],
padding=False,
truncation=True,
max_length=512,
return_tensors=None
)
# Tokenize dataset
tokenized_test = test_dataset.map(
preprocess_function,
batched=True,
remove_columns=test_dataset.column_names
)
# Set format for pytorch
tokenized_test.set_format("torch")
# Create DataLoader
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
test_loader = DataLoader(
tokenized_test,
batch_size=16,
collate_fn=data_collator,
shuffle=False
)
# Get predictions
predictions = []
with torch.no_grad():
for batch in test_loader:
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
preds = torch.argmax(outputs.logits, dim=-1)
predictions.extend(preds.cpu().numpy().tolist())
# Clean up GPU memory
if torch.cuda.is_available():
torch.cuda.empty_cache()
except Exception as e:
print(f"Error during model inference: {str(e)}")
raise
#--------------------------------------------------------------------------------------------
# MODEL INFERENCE ENDS HERE
#--------------------------------------------------------------------------------------------
# Stop tracking emissions
emissions_data = tracker.stop_task()
# Calculate accuracy
accuracy = accuracy_score(test_dataset["label"], predictions)
# Prepare results dictionary
results = {
"username": username,
"space_url": space_url,
"submission_timestamp": datetime.now().isoformat(),
"model_description": DESCRIPTION,
"accuracy": float(accuracy),
"energy_consumed_wh": emissions_data.energy_consumed * 1000,
"emissions_gco2eq": emissions_data.emissions * 1000,
"emissions_data": clean_emissions_data(emissions_data),
"api_route": ROUTE,
"dataset_config": {
"dataset_name": request.dataset_name,
"test_size": request.test_size,
"test_seed": request.test_seed
}
}
return results |