friendshipkim commited on
Commit
83247fa
·
1 Parent(s): 73af373

auth token

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -17,7 +17,7 @@ def preprocess_text(s):
17
 
18
  @st.cache
19
  def get_pairwise_distances(model):
20
- dataset = load_dataset(f"{ORG_ID}/{model}_distance")["train"]
21
  df = pd.DataFrame(dataset).set_index('index')
22
  return df
23
 
@@ -32,7 +32,7 @@ def get_pairwise_distances_chunked(model, chunk):
32
  @st.cache
33
  def get_query_strings():
34
  # df = pd.read_json(hf_hub_download(repo_id=repo_id, filename="IUR_Reddit_test_queries_english.jsonl"), lines = True)
35
- dataset = load_dataset(f"{ORG_ID}/IUR_Reddit_test_queries_english")["train"]
36
  df = pd.DataFrame(dataset)
37
  df['index'] = df.reset_index().index
38
  return df
@@ -44,7 +44,7 @@ def get_query_strings():
44
  @st.cache
45
  def get_candidate_strings():
46
  # df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
47
- dataset = load_dataset(f"{ORG_ID}/IUR_Reddit_test_candidates_english")["train"]
48
  df = pd.DataFrame(dataset)
49
  df['index'] = df.reset_index().index
50
  return df
@@ -55,7 +55,7 @@ def get_candidate_strings():
55
  @st.cache
56
  def get_embedding_dataset(model):
57
  # data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
58
- data = load_dataset(f"{ORG_ID}/{model}_embedding")
59
  return data
60
 
61
  @st.cache
 
17
 
18
  @st.cache
19
  def get_pairwise_distances(model):
20
+ dataset = load_dataset(f"{ORG_ID}/{model}_distance", use_auth_token=True)["train"]
21
  df = pd.DataFrame(dataset).set_index('index')
22
  return df
23
 
 
32
  @st.cache
33
  def get_query_strings():
34
  # df = pd.read_json(hf_hub_download(repo_id=repo_id, filename="IUR_Reddit_test_queries_english.jsonl"), lines = True)
35
+ dataset = load_dataset(f"{ORG_ID}/IUR_Reddit_test_queries_english", use_auth_token=True)["train"]
36
  df = pd.DataFrame(dataset)
37
  df['index'] = df.reset_index().index
38
  return df
 
44
  @st.cache
45
  def get_candidate_strings():
46
  # df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
47
+ dataset = load_dataset(f"{ORG_ID}/IUR_Reddit_test_candidates_english", use_auth_token=True)["train"]
48
  df = pd.DataFrame(dataset)
49
  df['index'] = df.reset_index().index
50
  return df
 
55
  @st.cache
56
  def get_embedding_dataset(model):
57
  # data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
58
+ data = load_dataset(f"{ORG_ID}/{model}_embedding", use_auth_token=True)
59
  return data
60
 
61
  @st.cache