Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -41,23 +41,23 @@ else:
|
|
41 |
st.write("Veuillez entrer du texte pour l'analyse.")
|
42 |
|
43 |
# Calculer les métriques de performance (vous devez ajuster ces lignes selon votre tâche)
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
"Métrique": ["Accuracy", "Precision", "Recall", "F1-score", "Balanced Accuracy"],
|
60 |
"Valeur": [accuracy, precision, recall, f1, balanced_accuracy]
|
61 |
})
|
62 |
-
|
63 |
|
|
|
41 |
st.write("Veuillez entrer du texte pour l'analyse.")
|
42 |
|
43 |
# Calculer les métriques de performance (vous devez ajuster ces lignes selon votre tâche)
|
44 |
+
|
45 |
+
inputs = df["text"].tolist()
|
46 |
+
true_labels = df["label"].tolist()
|
47 |
+
predictions = classifier(inputs, candidate_labels, hypothesis_template=hypothesis_template)
|
48 |
+
predicted_labels = [result['labels'][0] for result in predictions]
|
49 |
|
50 |
+
accuracy = accuracy_score(true_labels, predicted_labels)
|
51 |
+
precision = precision_score(true_labels, predicted_labels, average='binary')
|
52 |
+
recall = recall_score(true_labels, predicted_labels, average='binary')
|
53 |
+
f1 = f1_score(true_labels, predicted_labels, average='binary')
|
54 |
+
balanced_accuracy = balanced_accuracy_score(true_labels, predicted_labels)
|
55 |
+
|
56 |
+
# Afficher les métriques sous forme de tableau
|
57 |
+
st.header("Métriques de Performance")
|
58 |
+
metrics_df = pd.DataFrame({
|
59 |
"Métrique": ["Accuracy", "Precision", "Recall", "F1-score", "Balanced Accuracy"],
|
60 |
"Valeur": [accuracy, precision, recall, f1, balanced_accuracy]
|
61 |
})
|
62 |
+
st.table(metrics_df)
|
63 |
|