Spaces:
Sleeping
Sleeping
added check_profanity() functionality
Browse files- model_comparison.py +13 -5
model_comparison.py
CHANGED
|
@@ -4,15 +4,14 @@ import numpy as np
|
|
| 4 |
import plotly.express as px
|
| 5 |
from yaml import safe_load
|
| 6 |
import user_evaluation_variables
|
| 7 |
-
databaseDF = None
|
| 8 |
from pathlib import Path
|
| 9 |
from huggingface_hub import snapshot_download
|
|
|
|
|
|
|
|
|
|
| 10 |
EVAL_DATABASE_DIR = Path("data")
|
| 11 |
EVAL_DATABASE_DIR.mkdir(parents=True, exist_ok=True)
|
| 12 |
|
| 13 |
-
# GEN_EVAL_DATABASE_PATH = EVAL_DATABASE_DIR / f"general_eval_database.yaml"
|
| 14 |
-
# TASK_EVAL_DATABASE_PATH = EVAL_DATABASE_DIR / f"task_oriented_eval_database.yaml"
|
| 15 |
-
# snapshot_download(repo_id='JVice/try-before-you-bias-data',repo_type='dataset',local_dir='user_data')
|
| 16 |
GEN_EVAL_DATABASE_PATH = 'user_data/data/general_eval_database.yaml'
|
| 17 |
TASK_EVAL_DATABASE_PATH = 'user_data/data/task_oriented_eval_database.yaml'
|
| 18 |
def get_evaluation_id(evalType, debugging):
|
|
@@ -40,9 +39,18 @@ def get_evaluation_id(evalType, debugging):
|
|
| 40 |
st.write("NEW EVAL ID:", newEvalID)
|
| 41 |
return newEvalID
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
def dataframe_with_selections(df):
|
| 45 |
-
df_with_selections = df.copy()
|
| 46 |
df_with_selections.insert(0, "Select", True)
|
| 47 |
|
| 48 |
# Get dataframe row-selections from user with st.data_editor
|
|
|
|
| 4 |
import plotly.express as px
|
| 5 |
from yaml import safe_load
|
| 6 |
import user_evaluation_variables
|
|
|
|
| 7 |
from pathlib import Path
|
| 8 |
from huggingface_hub import snapshot_download
|
| 9 |
+
from profanity_check import predict
|
| 10 |
+
|
| 11 |
+
databaseDF = None
|
| 12 |
EVAL_DATABASE_DIR = Path("data")
|
| 13 |
EVAL_DATABASE_DIR.mkdir(parents=True, exist_ok=True)
|
| 14 |
|
|
|
|
|
|
|
|
|
|
| 15 |
GEN_EVAL_DATABASE_PATH = 'user_data/data/general_eval_database.yaml'
|
| 16 |
TASK_EVAL_DATABASE_PATH = 'user_data/data/task_oriented_eval_database.yaml'
|
| 17 |
def get_evaluation_id(evalType, debugging):
|
|
|
|
| 39 |
st.write("NEW EVAL ID:", newEvalID)
|
| 40 |
return newEvalID
|
| 41 |
|
| 42 |
+
def check_profanity(df):
|
| 43 |
+
cleanedDF = df
|
| 44 |
+
for i, row in cleanedDF.iterrows():
|
| 45 |
+
if predict([row['User']])[0] != 0.0:
|
| 46 |
+
cleanedDF.at[i, 'User'] = '**NSFW**'
|
| 47 |
+
if 'Target' in df:
|
| 48 |
+
if predict([row['Target']])[0] != 0.0:
|
| 49 |
+
cleanedDF.at[i, 'Target'] = '**NSFW**'
|
| 50 |
+
return cleanedDF
|
| 51 |
|
| 52 |
def dataframe_with_selections(df):
|
| 53 |
+
df_with_selections = check_profanity(df.copy())
|
| 54 |
df_with_selections.insert(0, "Select", True)
|
| 55 |
|
| 56 |
# Get dataframe row-selections from user with st.data_editor
|