Spaces:
Runtime error
Runtime error
Commit
·
dc9f328
1
Parent(s):
28dff12
Upload app.py
Browse files
app.py
CHANGED
@@ -21,22 +21,41 @@ from datasets import load_dataset
|
|
21 |
|
22 |
#Read data training data.
|
23 |
|
24 |
-
x1 = load_dataset("mertkarabacak/NCDB-GBM", data_files="
|
25 |
x1 = pd.DataFrame(x1['train'])
|
26 |
x1 = x1.iloc[:, 1:]
|
27 |
|
28 |
-
x2 = load_dataset("mertkarabacak/NCDB-GBM", data_files="
|
29 |
x2 = pd.DataFrame(x2['train'])
|
30 |
x2 = x2.iloc[:, 1:]
|
31 |
|
32 |
-
x3 = load_dataset("mertkarabacak/NCDB-GBM", data_files="
|
33 |
x3 = pd.DataFrame(x3['train'])
|
34 |
x3 = x3.iloc[:, 1:]
|
35 |
|
36 |
-
x4 = load_dataset("mertkarabacak/NCDB-GBM", data_files="
|
37 |
x4 = pd.DataFrame(x4['train'])
|
38 |
x4 = x4.iloc[:, 1:]
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
#Define feature names.
|
41 |
f1_names = list(x1.columns)
|
42 |
f1_names = [f1.replace('__', ' - ') for f1 in f1_names]
|
@@ -56,17 +75,28 @@ f4_names = [f4.replace('_', ' ') for f4 in f4_names]
|
|
56 |
|
57 |
|
58 |
#Prepare training data for the outcome 1.
|
59 |
-
|
60 |
|
61 |
#Prepare training data for the outcome 2.
|
62 |
-
|
63 |
|
64 |
#Prepare training data for the outcome 3.
|
65 |
-
|
66 |
|
67 |
-
#Prepare training data for the outcome
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
#Training models.
|
72 |
|
@@ -77,7 +107,7 @@ y1_model = tabpfn
|
|
77 |
y1_model = y1_model.fit(x1, y1, overwrite_warning=True)
|
78 |
|
79 |
y1_calib_model = CalibratedClassifierCV(y1_model, method='isotonic', cv='prefit')
|
80 |
-
y1_calib_model = y1_calib_model.fit(
|
81 |
|
82 |
y1_explainer = shap.Explainer(y1_model.predict, x1)
|
83 |
|
@@ -89,7 +119,7 @@ y2_model = tabpfn
|
|
89 |
y2_model = y2_model.fit(x2, y2, overwrite_warning=True)
|
90 |
|
91 |
y2_calib_model = CalibratedClassifierCV(y2_model, method='isotonic', cv='prefit')
|
92 |
-
y2_calib_model = y2_calib_model.fit(
|
93 |
|
94 |
y2_explainer = shap.Explainer(y2_model.predict, x2)
|
95 |
|
@@ -101,7 +131,7 @@ y3_model = tabpfn
|
|
101 |
y3_model = y3_model.fit(x3, y3, overwrite_warning=True)
|
102 |
|
103 |
y3_calib_model = CalibratedClassifierCV(y3_model, method='isotonic', cv='prefit')
|
104 |
-
y3_calib_model = y3_calib_model.fit(
|
105 |
|
106 |
y3_explainer = shap.Explainer(y3_model.predict, x3)
|
107 |
|
@@ -113,7 +143,7 @@ y4_model = tabpfn
|
|
113 |
y4_model = y4_model.fit(x4, y4, overwrite_warning=True)
|
114 |
|
115 |
y4_calib_model = CalibratedClassifierCV(y4_model, method='isotonic', cv='prefit')
|
116 |
-
y4_calib_model = y4_calib_model.fit(
|
117 |
|
118 |
y4_explainer = shap.Explainer(y4_model.predict, x4)
|
119 |
|
@@ -307,7 +337,7 @@ with gr.Blocks(title = "NCDB-Meningioma") as demo:
|
|
307 |
</tr>
|
308 |
<tr>
|
309 |
<td>6-Month Mortality</td>
|
310 |
-
<td>
|
311 |
<td>0.755 (0.733 - 0.777)</td>
|
312 |
<td>0.767 (0.745 - 0.789)</td>
|
313 |
<td>0.764 (0.742 - 0.786)</td>
|
|
|
21 |
|
22 |
#Read data training data.
|
23 |
|
24 |
+
x1 = load_dataset("mertkarabacak/NCDB-GBM", data_files="6m_data_train.csv", use_auth_token = HF_TOKEN)
|
25 |
x1 = pd.DataFrame(x1['train'])
|
26 |
x1 = x1.iloc[:, 1:]
|
27 |
|
28 |
+
x2 = load_dataset("mertkarabacak/NCDB-GBM", data_files="12m_data_train.csv", use_auth_token = HF_TOKEN)
|
29 |
x2 = pd.DataFrame(x2['train'])
|
30 |
x2 = x2.iloc[:, 1:]
|
31 |
|
32 |
+
x3 = load_dataset("mertkarabacak/NCDB-GBM", data_files="18m_data_train.csv", use_auth_token = HF_TOKEN)
|
33 |
x3 = pd.DataFrame(x3['train'])
|
34 |
x3 = x3.iloc[:, 1:]
|
35 |
|
36 |
+
x4 = load_dataset("mertkarabacak/NCDB-GBM", data_files="24m_data_train.csv", use_auth_token = HF_TOKEN)
|
37 |
x4 = pd.DataFrame(x4['train'])
|
38 |
x4 = x4.iloc[:, 1:]
|
39 |
|
40 |
+
#Read validation data.
|
41 |
+
|
42 |
+
x1_valid = load_dataset("mertkarabacak/NCDB-GBM", data_files="6m_data_valid.csv", use_auth_token = HF_TOKEN)
|
43 |
+
x1_valid = pd.DataFrame(x1_valid['train'])
|
44 |
+
x1_valid = x1_valid.iloc[:, 1:]
|
45 |
+
|
46 |
+
x2_valid = load_dataset("mertkarabacak/NCDB-GBM", data_files="12m_data_valid.csv", use_auth_token = HF_TOKEN)
|
47 |
+
x2_valid = pd.DataFrame(x2_valid['train'])
|
48 |
+
x2_valid = x2_valid.iloc[:, 1:]
|
49 |
+
|
50 |
+
x3_valid = load_dataset("mertkarabacak/NCDB-GBM", data_files="18m_data_valid.csv", use_auth_token = HF_TOKEN)
|
51 |
+
x3_valid = pd.DataFrame(x3_valid['train'])
|
52 |
+
x3_valid = x3_valid.iloc[:, 1:]
|
53 |
+
|
54 |
+
x4_valid = load_dataset("mertkarabacak/NCDB-GBM", data_files="24m_data_valid.csv", use_auth_token = HF_TOKEN)
|
55 |
+
x4_valid = pd.DataFrame(x4_valid['train'])
|
56 |
+
x4_valid = x4_valid.iloc[:, 1:]
|
57 |
+
|
58 |
+
|
59 |
#Define feature names.
|
60 |
f1_names = list(x1.columns)
|
61 |
f1_names = [f1.replace('__', ' - ') for f1 in f1_names]
|
|
|
75 |
|
76 |
|
77 |
#Prepare training data for the outcome 1.
|
78 |
+
y1_valid = x1_valid.pop('OUTCOME')
|
79 |
|
80 |
#Prepare training data for the outcome 2.
|
81 |
+
y2_valid = x2_valid.pop('OUTCOME')
|
82 |
|
83 |
#Prepare training data for the outcome 3.
|
84 |
+
y3_valid = x3_valid.pop('OUTCOME')
|
85 |
|
86 |
+
#Prepare training data for the outcome 4.
|
87 |
+
y4_valid = x4_valid.pop('OUTCOME')
|
88 |
+
|
89 |
+
#Prepare validation data for the outcome 1.
|
90 |
+
y1_valid = x1_valid.pop('OUTCOME')
|
91 |
+
|
92 |
+
#Prepare validation data for the outcome 2.
|
93 |
+
y2_valid = x2_valid.pop('OUTCOME')
|
94 |
|
95 |
+
#Prepare validation data for the outcome 3.
|
96 |
+
y3_valid = x3_valid.pop('OUTCOME')
|
97 |
+
|
98 |
+
#Prepare validation data for the outcome 4.
|
99 |
+
y4_valid = x4_valid.pop('OUTCOME')
|
100 |
|
101 |
#Training models.
|
102 |
|
|
|
107 |
y1_model = y1_model.fit(x1, y1, overwrite_warning=True)
|
108 |
|
109 |
y1_calib_model = CalibratedClassifierCV(y1_model, method='isotonic', cv='prefit')
|
110 |
+
y1_calib_model = y1_calib_model.fit(x1_valid, y1_valid)
|
111 |
|
112 |
y1_explainer = shap.Explainer(y1_model.predict, x1)
|
113 |
|
|
|
119 |
y2_model = y2_model.fit(x2, y2, overwrite_warning=True)
|
120 |
|
121 |
y2_calib_model = CalibratedClassifierCV(y2_model, method='isotonic', cv='prefit')
|
122 |
+
y2_calib_model = y2_calib_model.fit(x2_valid, y2_valid)
|
123 |
|
124 |
y2_explainer = shap.Explainer(y2_model.predict, x2)
|
125 |
|
|
|
131 |
y3_model = y3_model.fit(x3, y3, overwrite_warning=True)
|
132 |
|
133 |
y3_calib_model = CalibratedClassifierCV(y3_model, method='isotonic', cv='prefit')
|
134 |
+
y3_calib_model = y3_calib_model.fit(x3_valid, y3_valid)
|
135 |
|
136 |
y3_explainer = shap.Explainer(y3_model.predict, x3)
|
137 |
|
|
|
143 |
y4_model = y4_model.fit(x4, y4, overwrite_warning=True)
|
144 |
|
145 |
y4_calib_model = CalibratedClassifierCV(y4_model, method='isotonic', cv='prefit')
|
146 |
+
y4_calib_model = y4_calib_model.fit(x4_valid, y4_valid)
|
147 |
|
148 |
y4_explainer = shap.Explainer(y4_model.predict, x4)
|
149 |
|
|
|
337 |
</tr>
|
338 |
<tr>
|
339 |
<td>6-Month Mortality</td>
|
340 |
+
<td>TabPFN</td>
|
341 |
<td>0.755 (0.733 - 0.777)</td>
|
342 |
<td>0.767 (0.745 - 0.789)</td>
|
343 |
<td>0.764 (0.742 - 0.786)</td>
|