aur-beau commited on
Commit
d329a6c
·
1 Parent(s): 5ce12f1

fixed errors

Browse files
Files changed (2) hide show
  1. tasks/models/model_nn.pth +3 -0
  2. tasks/text.py +31 -13
tasks/models/model_nn.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45fab4b08e520f45893c39b8a94c1d444f59268393fd06635d3bb4d36b0b16e2
3
+ size 439226
tasks/text.py CHANGED
@@ -61,32 +61,50 @@ async def evaluate_text(request: TextEvaluationRequest):
61
  #--------------------------------------------------------------------------------------------
62
  # YOUR MODEL INFERENCE CODE HERE
63
 
64
- #Load the embedding model
65
- #model = SentenceTransformer("dunzhang/stella_en_400M_v5",trust_remote_code=True)
 
 
66
  model_name = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2" # You can use other Sentence Transformers models as needed
67
  sentence_model = SentenceTransformer(model_name)
68
 
69
  # Convert each sentence into a vector representation (embedding)
70
- embeddings = sentence_model.encode(test_dataset['quote'])
71
 
72
- # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
73
- #--------------------------------------------------------------------------------------------
74
-
75
  # Make random predictions (placeholder for actual model inference)
76
  true_labels = test_dataset["label"]
77
 
78
-
79
- #load the xgboost model
80
- #with open("models/stella_400_xgb_500.pkl",'rb') as f:
81
  # xgbclassifier = pickle.load(f)
82
-
83
- model_nn = torch.load("models/model_nn.pth")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  # Set the model to evaluation mode
86
  model_nn.eval()
87
 
88
- #make inference
89
- #predictions = xgbclassifier.predict(embeddings)
90
 
91
  # Make predictions
92
  with torch.no_grad():
 
61
  #--------------------------------------------------------------------------------------------
62
  # YOUR MODEL INFERENCE CODE HERE
63
 
64
+ # Set the device to MPS (if available)
65
+ device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
66
+ print(f"Using device: {device}")
67
+
68
  model_name = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2" # You can use other Sentence Transformers models as needed
69
  sentence_model = SentenceTransformer(model_name)
70
 
71
  # Convert each sentence into a vector representation (embedding)
72
+ embeddings = sentence_model.encode(test_dataset['quote'], convert_to_tensor=True)
73
 
 
 
 
74
  # Make random predictions (placeholder for actual model inference)
75
  true_labels = test_dataset["label"]
76
 
77
+ # load the xgboost model
78
+ # with open("models/stella_400_xgb_500.pkl",'rb') as f:
 
79
  # xgbclassifier = pickle.load(f)
80
+ from torch import nn, optim
81
+
82
+ class SimpleNN2(nn.Module):
83
+ def __init__(self, input_dim, output_dim):
84
+ super(SimpleNN2, self).__init__()
85
+ self.fc1 = nn.Linear(input_dim, 128) # Reduce hidden units
86
+ self.fc2 = nn.Linear(128, 64) # Further reduce units
87
+ self.fc3 = nn.Linear(64, output_dim)
88
+ self.relu = nn.ReLU()
89
+ self.dropout = nn.Dropout(0.3) # Add dropout
90
+ self.batch_norm1 = nn.BatchNorm1d(128)
91
+ self.batch_norm2 = nn.BatchNorm1d(64)
92
+
93
+ def forward(self, x):
94
+ x = self.relu(self.batch_norm1(self.fc1(x)))
95
+ x = self.dropout(x) # Apply dropout
96
+ x = self.relu(self.batch_norm2(self.fc2(x)))
97
+ x = self.dropout(x) # Apply dropout
98
+ x = self.fc3(x) # Output raw logits
99
+ return x
100
+
101
+ model_nn = torch.load("../tasks/models/model_nn.pth", map_location=device)
102
 
103
  # Set the model to evaluation mode
104
  model_nn.eval()
105
 
106
+ # make inference
107
+ # predictions = xgbclassifier.predict(embeddings)
108
 
109
  # Make predictions
110
  with torch.no_grad():