File size: 4,762 Bytes
1593542
 
44c820a
 
 
1593542
 
 
44c820a
1593542
 
44c820a
 
f356ef8
44c820a
 
f356ef8
44c820a
 
 
 
 
 
 
1593542
 
 
 
 
 
 
b111af8
1593542
f356ef8
 
 
 
 
 
1593542
 
b111af8
1593542
b111af8
f356ef8
1593542
b111af8
f356ef8
1593542
b111af8
 
 
 
 
 
 
1593542
 
 
 
 
 
 
b111af8
1593542
 
 
 
 
 
b111af8
1593542
 
44c820a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169


system("cd data; unzip Processed_K50_dG_datasets.zip")


ThermoMPNN_splits <- arrow::read_parquet("intermediate/ThermoMPNN_splits.parquet")


### Dataset1 ###

# Dataset1 consists of all cDNA proteolysis measurements of stability
dataset1 <- readr::read_csv(
    file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset1_20230416.csv",
    show_col_types = FALSE)

# note that some of the log10_K50_trypsin_ML and log10_K50_chmotrypsin_ML values are "-" and ">2.5".
# to maintain these non-standard values, we keep them as strings for the full dataset

dataset1 |>
    arrow::write_parquet(
        "intermediate/dataset1.parquet")



### Dataset2 and Dataset3 ###

# Dataset2 (for dG ML) consists of cDNA proteolysis measurements of stability that are of class G0 + G1
# Datase3 (for ddG ML) consists of cDNA proteolysis measurements of stability that are of class G0
#    G0: Good (wild-type ΔG values below 4.75 kcal mol^−1), 325,132 ΔG measurements at 17,093 sites in 365 domains
#    G1: Good but WT outside dynamic range

dataset2 <- readr::read_csv(
    file = "data/Processed_K50_dG_datasets/Tsuboyama2023_Dataset2_Dataset3_20230416.csv",
    show_col_types = FALSE) |>
    dplyr::mutate(
        log10_K50_trypsin_ML = as.numeric(log10_K50_trypsin_ML),
        log10_K50_chymotrypsin_ML = as.numeric(log10_K50_chymotrypsin_ML),
        dG_ML = as.numeric(dG_ML),
        ddG_ML = as.numeric(ddG_ML))
# 776,298 rows

dataset2 |>
    arrow::write_parquet(
        "intermediate/dataset2.parquet")
    

dataset3 <- dataset2 |>
    dplyr::filter(!is.na(ddG_ML))

dataset3 |>
    arrow::write_parquet(
        "intermediate/dataset3.parquet")



dataset3_single <- dataset3 |>
    dplyr::filter(!(mut_type |> stringr::str_detect("(ins|del|[:])")))
        

ThermoMPNN_splits |> dplyr::group_by(split_name) |>
    dplyr::do({
        split <- .
        split_name <- split$split_name[1]
        mutant_set <- dataset3_single |>
            dplyr::filter(mut_type != "wt") |>
            dplyr::semi_join(split, by = c("WT_name" = "id"))
        cat("Writing out split ", split_name, ", nrow: ", nrow(mutant_set), "\n", sep = "")
        
        arrow::write_parquet(
            x = mutant_set,
            sink = paste0("intermediate/dataset3_single_", split_name, ".parquet"))
        data.frame()
    })


####

system("cd data && unzip AlphaFold_model_PDBs.zip")


assemble_models <- function(
    data_path,
    dataset_tag,
    pattern,
    output_path) {

    cat(
        "data path: ", data_path, "\n",
        "dataset_tag: ", dataset_tag, "\n",
        "pattern: ", pattern, "\n",
        "output path: ", output_path, "\n",
        sep = "")

    file_index <- 1
    models <- list.files(
        path = data_path,
        full.names = TRUE,
        pattern = pattern,
        recursive = TRUE) |>
        purrr::map_dfr(.f = function(path) {
            file_handle <- path |>
                file(open = "rb") |>
                gzcon()

            if( file_index %% 10 == 0) {
                cat("Reading  '", path, "' ", file_index, "\n", sep = "")
            }
            file_index <<- file_index + 1

            lines <- file_handle |> readLines()
            file_handle |> close()

            data.frame(
                dataset_tag = dataset_tag,
                id = path |> basename() |> stringr::str_replace(".pdb", ""),
                pdb = lines |> paste0(collapse = "\n"))
        })
    models |> arrow::write_parquet(output_path)
}


assemble_models(
    data_path = "data/AlphaFold_model_PDBs",
    dataset_tag = "all",
    pattern = "*.pdb",
    output_path = "intermediate/all_pdbs.parquet")

# 
# assemble_models(
#     data_path = "data/AlphaFold_model_PDBs",
#     dataset_tag = "EA",
#     pattern = "EA[:]run.*pdb",
#     output_path = "intermediate/EA_pdbs.parquet")
# 
# 
# assemble_models(
#     data_path = "data/AlphaFold_model_PDBs",
#     dataset_tag = "EEHEE",
#     pattern = "EEHEE.*pdb",
#     output_path = "intermediate/EEHEE_pdbs.parquet")
# 
# 
# assemble_models(
#     data_path = "data/AlphaFold_model_PDBs",
#     dataset_tag = "EHEE",
#     pattern = "EHEE.*pdb",
#     output_path = "intermediate/EHEE_pdbs.parquet")
# 
# 
# assemble_models(
#     data_path = "data/AlphaFold_model_PDBs",
#     dataset_tag = "GG",
#     pattern = "GG[:]run.*pdb",
#     output_path = "intermediate/GG_pdbs.parquet")
# 
# 
# assemble_models(
#     data_path = "data/AlphaFold_model_PDBs",
#     dataset_tag = "HEEH_KT",
#     pattern = "HEEH_KT_rd.*pdb",
#     output_path = "intermediate/HEEH_KT_pdbs.parquet")
# 
# assemble_models(
#     data_path = "data/AlphaFold_model_PDBs",
#     dataset_tag = "HEEH",
#     pattern = "HEEH_rd.*pdb",
#     output_path = "intermediate/HEEH_pdbs.parquet")