Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -31,7 +31,6 @@ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REP
|
|
31 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
32 |
from src.submission.submit import add_new_eval
|
33 |
|
34 |
-
|
35 |
def debug_model_names(df, label="λλ²κ·Έ"):
|
36 |
"""
|
37 |
λ°μ΄ν°νλ μμμ λͺ¨λΈ μ΄λ¦ κ΄λ ¨ μ΄μ λλ²κΉ
νκΈ° μν ν¨μ
|
@@ -52,7 +51,7 @@ def debug_model_names(df, label="λλ²κ·Έ"):
|
|
52 |
print("\n")
|
53 |
|
54 |
print("==================\n")
|
55 |
-
|
56 |
def restart_space():
|
57 |
API.restart_space(repo_id=REPO_ID)
|
58 |
|
@@ -84,6 +83,8 @@ except Exception:
|
|
84 |
|
85 |
|
86 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
|
|
|
|
87 |
|
88 |
# λ³ν 맀ν μ μ
|
89 |
benchmark_mapping = {
|
@@ -101,6 +102,10 @@ if model_type_column in LEADERBOARD_DF.columns:
|
|
101 |
running_eval_queue_df,
|
102 |
pending_eval_queue_df,
|
103 |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
|
|
|
|
|
|
|
|
104 |
|
105 |
def init_leaderboard(dataframe):
|
106 |
if dataframe is None or dataframe.empty:
|
@@ -256,4 +261,4 @@ with demo:
|
|
256 |
scheduler = BackgroundScheduler()
|
257 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
258 |
scheduler.start()
|
259 |
-
demo.queue(default_concurrency_limit=40).launch()
|
|
|
31 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
32 |
from src.submission.submit import add_new_eval
|
33 |
|
|
|
34 |
def debug_model_names(df, label="λλ²κ·Έ"):
|
35 |
"""
|
36 |
λ°μ΄ν°νλ μμμ λͺ¨λΈ μ΄λ¦ κ΄λ ¨ μ΄μ λλ²κΉ
νκΈ° μν ν¨μ
|
|
|
51 |
print("\n")
|
52 |
|
53 |
print("==================\n")
|
54 |
+
|
55 |
def restart_space():
|
56 |
API.restart_space(repo_id=REPO_ID)
|
57 |
|
|
|
83 |
|
84 |
|
85 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
86 |
+
# λλ²κΉ
μ μν μ½λ (νμμ μ£Όμ ν΄μ )
|
87 |
+
# debug_model_names(LEADERBOARD_DF, "Leaderboard λ°μ΄ν°")
|
88 |
|
89 |
# λ³ν 맀ν μ μ
|
90 |
benchmark_mapping = {
|
|
|
102 |
running_eval_queue_df,
|
103 |
pending_eval_queue_df,
|
104 |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
105 |
+
# λλ²κΉ
μ μν μ½λ (νμμ μ£Όμ ν΄μ )
|
106 |
+
# debug_model_names(finished_eval_queue_df, "μλ£λ νκ° ν")
|
107 |
+
# debug_model_names(running_eval_queue_df, "μ€ν μ€μΈ νκ° ν")
|
108 |
+
# debug_model_names(pending_eval_queue_df, "λκΈ° μ€μΈ νκ° ν")
|
109 |
|
110 |
def init_leaderboard(dataframe):
|
111 |
if dataframe is None or dataframe.empty:
|
|
|
261 |
scheduler = BackgroundScheduler()
|
262 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
263 |
scheduler.start()
|
264 |
+
demo.queue(default_concurrency_limit=40).launch()
|