maom commited on
Commit
f356ef8
·
1 Parent(s): c297299

update upload assemble and upload scripts

Browse files
src/02.2_assemble_K50_dG_dataset.R CHANGED
@@ -11,38 +11,10 @@ ThermoMPNN_splits <- arrow::read_parquet("intermediate/ThermoMPNN_splits.parquet
11
  # Dataset1 consists of all cDNA proteolysis measurements of stability
12
  dataset1 <- readr::read_csv(
13
  file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset1_20230416.csv",
14
- col_types = readr::cols(
15
- name = readr::col_character(),
16
- dna_seq = readr::col_character(),
17
- log10_K50_t = readr::col_double(),
18
- log10_K50_t_95CI_high = readr::col_double(),
19
- log10_K50_t_95CI_low = readr::col_double(),
20
- log10_K50_t_95CI = readr::col_double(),
21
- fitting_error_t = readr::col_double(),
22
- log10_K50unfolded_t = readr::col_double(),
23
- deltaG_t = readr::col_double(),
24
- deltaG_t_95CI_high = readr::col_double(),
25
- deltaG_t_95CI_low = readr::col_double(),
26
- deltaG_t_95CI = readr::col_double(),
27
- log10_K50_c = readr::col_double(),
28
- log10_K50_c_95CI_high = readr::col_double(),
29
- log10_K50_c_95CI_low = readr::col_double(),
30
- log10_K50_c_95CI = readr::col_double(),
31
- fitting_error_c = readr::col_double(),
32
- log10_K50unfolded_c = readr::col_double(),
33
- deltaG_c = readr::col_double(),
34
- deltaG_c_95CI_high = readr::col_double(),
35
- deltaG_c_95CI_low = readr::col_double(),
36
- deltaG_c_95CI = readr::col_double(),
37
- deltaG = readr::col_double(),
38
- deltaG_95CI_high = readr::col_double(),
39
- deltaG_95CI_low = readr::col_double(),
40
- deltaG_95CI = readr::col_double(),
41
- log10_K50_trypsin_ML = readr::col_double(),
42
- log10_K50_chymotrypsin_ML = readr::col_double()))
43
 
44
  # note that some of the log10_K50_trypsin_ML and log10_K50_chmotrypsin_ML values are "-" and ">2.5".
45
- # These are parsed as NA values"
46
 
47
  dataset1 |>
48
  arrow::write_parquet(
@@ -59,15 +31,21 @@ dataset1 |>
59
 
60
  dataset2 <- readr::read_csv(
61
  file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv",
62
- show_col_types = FALSE)
 
 
 
 
 
63
  # 776,298 rows
64
 
65
  dataset2 |>
66
  arrow::write_parquet(
67
  "intermediate/dataset2.parquet")
 
68
 
69
  dataset3 <- dataset2 |>
70
- dplyr::filter(ddG_ML != "-")
71
 
72
  dataset3 |>
73
  arrow::write_parquet(
 
11
  # Dataset1 consists of all cDNA proteolysis measurements of stability
12
  dataset1 <- readr::read_csv(
13
  file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset1_20230416.csv",
14
+ show_col_types = FALSE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # note that some of the log10_K50_trypsin_ML and log10_K50_chmotrypsin_ML values are "-" and ">2.5".
17
+ # to maintain these non-standard values, we keep them as strings for the full dataset
18
 
19
  dataset1 |>
20
  arrow::write_parquet(
 
31
 
32
  dataset2 <- readr::read_csv(
33
  file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv",
34
+ show_col_types = FALSE) |>
35
+ dplyr::mutate(
36
+ log10_K50_trypsin_ML = as.numeric(log10_K50_trypsin_ML),
37
+ log10_K50_chymotrypsin_ML = as.numeric(log10_K50_chymotrypsin_ML),
38
+ dG_ML = as.numeric(dG_ML),
39
+ ddG_ML = as.numeric(ddG_ML))
40
  # 776,298 rows
41
 
42
  dataset2 |>
43
  arrow::write_parquet(
44
  "intermediate/dataset2.parquet")
45
+
46
 
47
  dataset3 <- dataset2 |>
48
+ dplyr::filter(!is.na(ddG_ML))
49
 
50
  dataset3 |>
51
  arrow::write_parquet(
src/02.2_check_assembled_datasets.R CHANGED
@@ -1,7 +1,38 @@
1
 
2
 
3
  # consistency between models and function predictions
4
- source("product/MPI/src/summarize_map.R")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
 
7
 
@@ -13,7 +44,7 @@ check_id_consistency <- function(
13
  if (verbose) {
14
  cat("Loading model ids...\n")
15
  }
16
- ids <- arrow::read_parquet(
17
  paste0("intermediate/", dataset_tag, "_", split, ".parquet"),
18
  col_select = "id")
19
 
 
1
 
2
 
3
  # consistency between models and function predictions
4
+ source("src/summarize_map.R")
5
+
6
+
7
+ dataset1_name <- arrow::read_parquet(
8
+ "intermediate/dataset1.parquet",
9
+ col_select = "name") |>
10
+ dplyr::mutate(
11
+ WT_name = name |> stringr::str_replace("pdb_[A-Z][0-9]+[A-Z]", "pdb")) |>
12
+ dplyr::filter(
13
+ !(WT_name |> stringr::str_detect("[0-9][A-Z0-9a-z]{3}([.]pdb)?")),
14
+ !(WT_name |> stringr::str_detect("ruler")))
15
+
16
+ names_joined |> dplyr::filter(is.na(name_models)) |> dplyr::select(-name_models) |> dplyr::filter(!(WT_name |> stringr::str_detect("ruler")), !(WT_name |> stringr::str_detect("set")), !(WT_name |> stringr::str_detect("_del")), !(WT_name |> stringr::str_detect("_ins")), !(WT_name |> stringr::str_detect("_wt[a-z]")), !(WT_name |> stringr::str_detect("scramble")), !(WT_name |> stringr::str_detect("(PP5|ZF5)[.]3")), !(WT_name |> stringr::str_detect("(UAH|SAH)-p53-8R"))) |> dplyr::filter(WT_name |> stringr::str_detect("pdb"))
17
+
18
+ models_name <- arrow::read_parquet(
19
+ "intermediate/AlphaFold_model_PDBs.parquet",
20
+ col_select = "name") |>
21
+ dplyr::mutate(
22
+ name = name |> stringr::str_replace(":", "[|]"))
23
+
24
+ names_joined <- dplyr::full_join(
25
+ dataset1_name |> dplyr::mutate(name_dataset1 = WT_name),
26
+ models_name |> dplyr::mutate(name_models = name),
27
+ by = c("WT_name" = "name"))
28
+
29
+
30
+ names_joined_summary <- names_joined |>
31
+ summarize_map(
32
+ x_cols = name_dataset1,
33
+ y_cols = name_models,
34
+ verbose = TRUE)
35
+
36
 
37
 
38
 
 
44
  if (verbose) {
45
  cat("Loading model ids...\n")
46
  }
47
+ dataset1 <- arrow::read_parquet(
48
  paste0("intermediate/", dataset_tag, "_", split, ".parquet"),
49
  col_select = "id")
50
 
src/02.3_assemble_structure_datasets.R ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #' Assemble PDBs
2
+ #'
3
+ #' @param data_path character directory .pdb.gz files are located
4
+ #' @param output_path character output .parquet path
5
+ #'
6
+ #' Write output_path .parquet file with columns
7
+ #' <id> <pdb>
8
+ assemble_models <- function(
9
+ data_path,
10
+ output_path) {
11
+
12
+ cat(
13
+ "data path: ", data_path, "\n",
14
+ "output path: ", output_path, "\n",
15
+ sep = "")
16
+
17
+ file_index <- 1
18
+ models <- list.files(
19
+ path = data_path,
20
+ full.names = TRUE,
21
+ pattern = "*.pdb",
22
+ recursive = TRUE) |>
23
+ purrr::map_dfr(.f = function(path) {
24
+ file_handle <- path |>
25
+ file(open = "rb") |>
26
+ gzcon()
27
+
28
+ if( file_index %% 20 == 0) {
29
+ cat("Reading '", path, "' ", file_index, "\n", sep = "")
30
+ }
31
+ file_index <<- file_index + 1
32
+
33
+ lines <- file_handle |> readLines()
34
+ file_handle |> close()
35
+
36
+ data.frame(
37
+ name = path |>
38
+ basename() |>
39
+ stringr::str_replace("[:]", "|"),
40
+ pdb = lines |> paste0(collapse = "\n"))
41
+ })
42
+ models |> arrow::write_parquet(output_path)
43
+ models
44
+ }
45
+
46
+
47
+ assemble_models(
48
+ data_path = "data/AlphaFold_model_PDBs",
49
+ output_path = "intermediate/AlphaFold_model_PDBs.parquet")
src/03.1_upload_data.py CHANGED
@@ -20,7 +20,7 @@ import datasets
20
  # dataset2
21
  # dataset3
22
  # dataset3_single
23
- # dataset3_single_CV
24
 
25
 
26
 
@@ -94,32 +94,32 @@ dataset.push_to_hub(
94
  commit_message = "Upload dataset3_single")
95
 
96
 
97
- ##### dataset3_single_CV #######
98
  dataset = datasets.load_dataset(
99
  "parquet",
100
- name = "dataset3_single_CV",
101
  data_dir = "./intermediate",
102
  data_files = {
103
- "train_0" : "dataset3_single_CV_train_0.parquet",
104
- "train_1" : "dataset3_single_CV_train_1.parquet",
105
- "train_2" : "dataset3_single_CV_train_2.parquet",
106
- "train_3" : "dataset3_single_CV_train_3.parquet",
107
- "train_4" : "dataset3_single_CV_train_4.parquet",
108
- "val_0" : "dataset3_single_CV_val_0.parquet",
109
- "val_1" : "dataset3_single_CV_val_1.parquet",
110
- "val_2" : "dataset3_single_CV_val_2.parquet",
111
- "val_3" : "dataset3_single_CV_val_3.parquet",
112
- "val_4" : "dataset3_single_CV_val_4.parquet",
113
- "test_0" : "dataset3_single_CV_test_0.parquet",
114
- "test_1" : "dataset3_single_CV_test_1.parquet",
115
- "test_2" : "dataset3_single_CV_test_2.parquet",
116
- "test_3" : "dataset3_single_CV_test_3.parquet",
117
- "test_4" : "dataset3_single_CV_test_4.parquet"},
118
  cache_dir = "/scratch/maom_root/maom0/maom",
119
  keep_in_memory = True)
120
 
121
  dataset.push_to_hub(
122
- repo_id = "MaomLab/MegaScale",
123
- config_name = "dataset3_single_CV",
124
- data_dir = "datase3_single_CV/data",
125
- commit_message = "Upload dataset3_single_CV")
 
20
  # dataset2
21
  # dataset3
22
  # dataset3_single
23
+ # dataset3_single_cv
24
 
25
 
26
 
 
94
  commit_message = "Upload dataset3_single")
95
 
96
 
97
+ ##### dataset3_single_cv #######
98
  dataset = datasets.load_dataset(
99
  "parquet",
100
+ name = "dataset3_single_cv",
101
  data_dir = "./intermediate",
102
  data_files = {
103
+ "train_0" : "dataset3_single_cv_train_0.parquet",
104
+ "train_1" : "dataset3_single_cv_train_1.parquet",
105
+ "train_2" : "dataset3_single_cv_train_2.parquet",
106
+ "train_3" : "dataset3_single_cv_train_3.parquet",
107
+ "train_4" : "dataset3_single_cv_train_4.parquet",
108
+ "val_0" : "dataset3_single_cv_val_0.parquet",
109
+ "val_1" : "dataset3_single_cv_val_1.parquet",
110
+ "val_2" : "dataset3_single_cv_val_2.parquet",
111
+ "val_3" : "dataset3_single_cv_val_3.parquet",
112
+ "val_4" : "dataset3_single_cv_val_4.parquet",
113
+ "test_0" : "dataset3_single_cv_test_0.parquet",
114
+ "test_1" : "dataset3_single_cv_test_1.parquet",
115
+ "test_2" : "dataset3_single_cv_test_2.parquet",
116
+ "test_3" : "dataset3_single_cv_test_3.parquet",
117
+ "test_4" : "dataset3_single_cv_test_4.parquet"},
118
  cache_dir = "/scratch/maom_root/maom0/maom",
119
  keep_in_memory = True)
120
 
121
  dataset.push_to_hub(
122
+ repo_id = "maom/MegaScale",
123
+ config_name = "dataset3_single_cv",
124
+ data_dir = "datase3_single_cv/data",
125
+ commit_message = "Upload dataset3_single_cv")
src/03.2_check_uploaded_data.py CHANGED
@@ -27,20 +27,20 @@ test_local_hf_match("dataset3_single", "train")
27
  test_local_hf_match("dataset3_single", "val")
28
  test_local_hf_match("dataset3_single", "test")
29
 
30
- test_local_hf_match("dataset3_single_CV", "train_0")
31
- test_local_hf_match("dataset3_single_CV", "train_1")
32
- test_local_hf_match("dataset3_single_CV", "train_2")
33
- test_local_hf_match("dataset3_single_CV", "train_3")
34
- test_local_hf_match("dataset3_single_CV", "train_4")
35
-
36
- test_local_hf_match("dataset3_single_CV", "val_0")
37
- test_local_hf_match("dataset3_single_CV", "val_1")
38
- test_local_hf_match("dataset3_single_CV", "val_2")
39
- test_local_hf_match("dataset3_single_CV", "val_3")
40
- test_local_hf_match("dataset3_single_CV", "val_4")
41
-
42
- test_local_hf_match("dataset3_single_CV", "test_0")
43
- test_local_hf_match("dataset3_single_CV", "test_1")
44
- test_local_hf_match("dataset3_single_CV", "test_2")
45
- test_local_hf_match("dataset3_single_CV", "test_3")
46
- test_local_hf_match("dataset3_single_CV", "test_4")
 
27
  test_local_hf_match("dataset3_single", "val")
28
  test_local_hf_match("dataset3_single", "test")
29
 
30
+ test_local_hf_match("dataset3_single_cv", "train_0")
31
+ test_local_hf_match("dataset3_single_cv", "train_1")
32
+ test_local_hf_match("dataset3_single_cv", "train_2")
33
+ test_local_hf_match("dataset3_single_cv", "train_3")
34
+ test_local_hf_match("dataset3_single_cv", "train_4")
35
+
36
+ test_local_hf_match("dataset3_single_cv", "val_0")
37
+ test_local_hf_match("dataset3_single_cv", "val_1")
38
+ test_local_hf_match("dataset3_single_cv", "val_2")
39
+ test_local_hf_match("dataset3_single_cv", "val_3")
40
+ test_local_hf_match("dataset3_single_cv", "val_4")
41
+
42
+ test_local_hf_match("dataset3_single_cv", "test_0")
43
+ test_local_hf_match("dataset3_single_cv", "test_1")
44
+ test_local_hf_match("dataset3_single_cv", "test_2")
45
+ test_local_hf_match("dataset3_single_cv", "test_3")
46
+ test_local_hf_match("dataset3_single_cv", "test_4")