tathagataraha commited on
Commit
b50c184
·
1 Parent(s): d7b4378

[ADD] Submission of private models

Browse files
Files changed (4) hide show
  1. app.py +1 -1
  2. src/about.py +1 -1
  3. src/populate.py +2 -2
  4. src/submission/submit.py +17 -12
app.py CHANGED
@@ -50,7 +50,7 @@ from src.display.utils import (
50
  fields,
51
  render_generation_templates
52
  )
53
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
54
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
55
  from src.submission.submit import add_new_eval, PLACEHOLDER_DATASET_WISE_NORMALIZATION_CONFIG
56
 
 
50
  fields,
51
  render_generation_templates
52
  )
53
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN, PRIVATE_REPO
54
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
55
  from src.submission.submit import add_new_eval, PLACEHOLDER_DATASET_WISE_NORMALIZATION_CONFIG
56
 
src/about.py CHANGED
@@ -101,7 +101,7 @@ NUM_FEWSHOT = 0 # Change with your few shot
101
 
102
 
103
  # Your leaderboard name
104
- TITLE = """""" #<h1 align="center" id="space-title"> NER Leaderboard</h1>"""
105
  # LOGO = """<img src="https://equalengineers.com/wp-content/uploads/2024/04/dummy-logo-5b.png" alt="Clinical X HF" width="500" height="333">"""
106
  LOGO = """<img src="https://huggingface.co/spaces/m42-health/MEDIC-Benchmark/resolve/main/assets/logo_medic_4.png" alt="Clinical X HF" width="40%" style="display: block; margin-left: auto; margin-right: auto;">"""
107
  FIVE_PILLAR_DIAGRAM = """<img src="https://huggingface.co/spaces/m42-health/MEDIC-Benchmark/resolve/main/assets/MEDIC_Diagram.jpg" alt="MEDIC Diagram" width="52%" style="display: block; margin-left: auto; margin-right: auto;">"""
 
101
 
102
 
103
  # Your leaderboard name
104
+ TITLE = """<h1 align="center" id="space-title" style="color: red;"> [DEV Mode] </h1>"""
105
  # LOGO = """<img src="https://equalengineers.com/wp-content/uploads/2024/04/dummy-logo-5b.png" alt="Clinical X HF" width="500" height="333">"""
106
  LOGO = """<img src="https://huggingface.co/spaces/m42-health/MEDIC-Benchmark/resolve/main/assets/logo_medic_4.png" alt="Clinical X HF" width="40%" style="display: block; margin-left: auto; margin-right: auto;">"""
107
  FIVE_PILLAR_DIAGRAM = """<img src="https://huggingface.co/spaces/m42-health/MEDIC-Benchmark/resolve/main/assets/MEDIC_Diagram.jpg" alt="MEDIC Diagram" width="52%" style="display: block; margin-left: auto; margin-right: auto;">"""
src/populate.py CHANGED
@@ -46,7 +46,7 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
46
  file_path = os.path.join(save_path, entry)
47
  with open(file_path) as fp:
48
  data = json.load(fp)
49
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model_name"])
50
  data[EvalQueueColumn.revision.name] = data.get("revision", "main")
51
  # changes to be made here
52
  data[EvalQueueColumn.closed_ended_status.name] = data["status"]["closed-ended"]
@@ -63,7 +63,7 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
63
  with open(file_path) as fp:
64
  data = json.load(fp)
65
  # print(data)
66
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model_name"])
67
  data[EvalQueueColumn.revision.name] = data.get("revision", "main")
68
  data[EvalQueueColumn.closed_ended_status.name] = data["status"]["closed-ended"]
69
  data[EvalQueueColumn.open_ended_status.name] = data["status"]["open-ended"]
 
46
  file_path = os.path.join(save_path, entry)
47
  with open(file_path) as fp:
48
  data = json.load(fp)
49
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model_name"]) if not data["private"] else data["model_name"]
50
  data[EvalQueueColumn.revision.name] = data.get("revision", "main")
51
  # changes to be made here
52
  data[EvalQueueColumn.closed_ended_status.name] = data["status"]["closed-ended"]
 
63
  with open(file_path) as fp:
64
  data = json.load(fp)
65
  # print(data)
66
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model_name"]) if not data["private"] else data["model_name"]
67
  data[EvalQueueColumn.revision.name] = data.get("revision", "main")
68
  data[EvalQueueColumn.closed_ended_status.name] = data["status"]["closed-ended"]
69
  data[EvalQueueColumn.open_ended_status.name] = data["status"]["open-ended"]
src/submission/submit.py CHANGED
@@ -4,7 +4,7 @@ import ast
4
  from datetime import datetime, timezone
5
 
6
  from src.display.formatting import styled_error, styled_message, styled_warning
7
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
8
  from src.submission.check_validity import (
9
  already_submitted_models,
10
  check_model_card,
@@ -61,9 +61,11 @@ def add_new_eval(
61
  """
62
  global REQUESTED_MODELS
63
  global USERS_TO_SUBMISSION_DATES
64
- if not REQUESTED_MODELS:
65
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
66
  if model.startswith("/"):
 
 
67
  user_name = ""
68
  model_path = model
69
  private = True
@@ -88,12 +90,12 @@ def add_new_eval(
88
  revision = "main"
89
 
90
  # Is the model on the hub?
91
- if weight_type in ["Delta", "Adapter"]:
92
  base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
93
  if not base_model_on_hub:
94
  return styled_error(f'Base model "{base_model}" {error}')
95
 
96
- if not weight_type == "Adapter":
97
  model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
98
  if not model_on_hub:
99
  return styled_error(f'Model "{model}" {error}')
@@ -101,7 +103,7 @@ def add_new_eval(
101
 
102
  # Is the model info correctly filled?
103
  try:
104
- if model.startswith("/"):
105
  model_info = API.model_info(repo_id=model, revision=revision)
106
  model_size = get_model_size(model_info=model_info)
107
  license = model_info.cardData["license"]
@@ -112,7 +114,7 @@ def add_new_eval(
112
  else:
113
  model_size = None
114
  license = None
115
- likes = 0
116
  except Exception:
117
  return styled_error("Could not get your model information. Please fill it up properly.")
118
 
@@ -158,16 +160,19 @@ def add_new_eval(
158
 
159
  # Check for duplicate submission
160
 
161
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
162
  return styled_warning("This model has been already submitted. Add the revision if the model has been updated.")
163
 
164
  print("Creating eval file")
165
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
 
 
 
 
 
 
 
166
  os.makedirs(OUT_DIR, exist_ok=True)
167
- if model_path.startswith("/"):
168
- os.makedirs(f"{OUT_DIR}/{model_path}", exist_ok=True)
169
- out_path = f"{OUT_DIR}/{model_path}_{revision}_{precision}_{weight_type}_eval_request.json"
170
-
171
  with open(out_path, "w") as f:
172
  f.write(json.dumps(eval_entry))
173
 
 
4
  from datetime import datetime, timezone
5
 
6
  from src.display.formatting import styled_error, styled_message, styled_warning
7
+ from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO, PRIVATE_REPO
8
  from src.submission.check_validity import (
9
  already_submitted_models,
10
  check_model_card,
 
61
  """
62
  global REQUESTED_MODELS
63
  global USERS_TO_SUBMISSION_DATES
64
+ if not REQUESTED_MODELS and not PRIVATE_REPO:
65
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
66
  if model.startswith("/"):
67
+ if not PRIVATE_REPO:
68
+ return styled_error("Private models are not allowed to be submitted to the public queue.")
69
  user_name = ""
70
  model_path = model
71
  private = True
 
90
  revision = "main"
91
 
92
  # Is the model on the hub?
93
+ if weight_type in ["Delta", "Adapter"] and not private:
94
  base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
95
  if not base_model_on_hub:
96
  return styled_error(f'Base model "{base_model}" {error}')
97
 
98
+ if not weight_type == "Adapter" and not private:
99
  model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
100
  if not model_on_hub:
101
  return styled_error(f'Model "{model}" {error}')
 
103
 
104
  # Is the model info correctly filled?
105
  try:
106
+ if not private:
107
  model_info = API.model_info(repo_id=model, revision=revision)
108
  model_size = get_model_size(model_info=model_info)
109
  license = model_info.cardData["license"]
 
114
  else:
115
  model_size = None
116
  license = None
117
+ likes = -1
118
  except Exception:
119
  return styled_error("Could not get your model information. Please fill it up properly.")
120
 
 
160
 
161
  # Check for duplicate submission
162
 
163
+ if not PRIVATE_REPO and f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
164
  return styled_warning("This model has been already submitted. Add the revision if the model has been updated.")
165
 
166
  print("Creating eval file")
167
+ if not private:
168
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
169
+
170
+ else:
171
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/offline"
172
+ model_path = model_path[1:] if model_path.startswith("/") else model_path
173
+ model_path = model_path.replace("/", "+-+")
174
+ out_path = f"{OUT_DIR}/{model_path}_{revision}_{precision}_{weight_type}_eval_request.json"
175
  os.makedirs(OUT_DIR, exist_ok=True)
 
 
 
 
176
  with open(out_path, "w") as f:
177
  f.write(json.dumps(eval_entry))
178