Spaces:
Runtime error
Runtime error
Commit
·
0eba69d
1
Parent(s):
f41ffa8
Update app.py
Browse files
app.py
CHANGED
@@ -24,11 +24,14 @@ basedir = os.path.dirname(__file__)
|
|
24 |
#dataOMS = pd.read_csv(basedir + "\\" + f'SnomedOMS.csv')
|
25 |
#dataICD10 = pd.read_csv(basedir + "\\" + f'ICD10Diagnosis.csv')
|
26 |
|
|
|
|
|
27 |
dataLOINC = pd.read_csv(f'LoincTableCore.csv')
|
28 |
dataPanels = pd.read_csv(f'PanelsAndForms-ACW1208Labeled.csv')
|
29 |
dataSNOMED = pd.read_csv(f'sct2_TextDefinition_Full-en_US1000124_20220901.txt',sep='\t')
|
30 |
dataOMS = pd.read_csv(f'SnomedOMS.csv')
|
31 |
-
dataICD10 = pd.read_csv(f'ICD10Diagnosis.csv')
|
|
|
32 |
|
33 |
dir_path = os.path.dirname(os.path.realpath(__file__))
|
34 |
EXAMPLES = {}
|
@@ -76,6 +79,14 @@ def MatchICD10(name):
|
|
76 |
swith=data.loc[data['Description'].str.contains(name, case=False, na=False)]
|
77 |
return swith
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
def SaveResult(text, outputfileName):
|
80 |
#try:
|
81 |
basedir = os.path.dirname(__file__)
|
@@ -86,12 +97,12 @@ def SaveResult(text, outputfileName):
|
|
86 |
if file_exists:
|
87 |
with open(outputfileName, "a") as f: #append
|
88 |
#for line in text:
|
89 |
-
f.write(str(text.replace("\n","
|
90 |
f.write('\n')
|
91 |
else:
|
92 |
with open(outputfileName, "w") as f: #write
|
93 |
#for line in text:
|
94 |
-
f.write(str(text.replace("\n","
|
95 |
f.write('\n')
|
96 |
#except ValueError as err:
|
97 |
# raise ValueError("File Save Error in SaveResult \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None
|
@@ -121,7 +132,8 @@ def loadFile(filename):
|
|
121 |
|
122 |
def get_today_filename():
|
123 |
from datetime import datetime
|
124 |
-
date = datetime.now().strftime("%Y_%m_%d-%I.%M.%S.%p")
|
|
|
125 |
#print(f"filename_{date}") 'filename_2023_01_12-03-29-22_AM'
|
126 |
return f"MedNER_{date}.csv"
|
127 |
|
@@ -151,6 +163,7 @@ def group_by_entity(raw):
|
|
151 |
g3=MatchSNOMED(eterm)
|
152 |
g4=MatchOMS(eterm)
|
153 |
g5=MatchICD10(eterm)
|
|
|
154 |
sAll = ""
|
155 |
|
156 |
print("Saving to output file " + outputFile)
|
@@ -195,6 +208,13 @@ def group_by_entity(raw):
|
|
195 |
s5 = ("ICD10," + myEntityGroup + "," + eterm + ",descriptions of ," + g52 + "," + g51 + ", Label,Value, Label,Value, Label,Value ")
|
196 |
if g51 != 'Series([] )': SaveResult(s5, outputFile)
|
197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
except ValueError as err:
|
199 |
raise ValueError("Error in group by entity \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None
|
200 |
|
@@ -227,6 +247,7 @@ def ner(text):
|
|
227 |
}
|
228 |
|
229 |
outputFile = group_by_entity(raw)
|
|
|
230 |
label = EXAMPLES.get(text, "Unknown")
|
231 |
outputDataframe = pd.read_csv(outputFile)
|
232 |
return (ner_content, outputDataframe, outputFile)
|
|
|
24 |
#dataOMS = pd.read_csv(basedir + "\\" + f'SnomedOMS.csv')
|
25 |
#dataICD10 = pd.read_csv(basedir + "\\" + f'ICD10Diagnosis.csv')
|
26 |
|
27 |
+
filepath = '/Users/konyeach/src/Biomed_NLP_AI/'
|
28 |
+
|
29 |
dataLOINC = pd.read_csv(f'LoincTableCore.csv')
|
30 |
dataPanels = pd.read_csv(f'PanelsAndForms-ACW1208Labeled.csv')
|
31 |
dataSNOMED = pd.read_csv(f'sct2_TextDefinition_Full-en_US1000124_20220901.txt',sep='\t')
|
32 |
dataOMS = pd.read_csv(f'SnomedOMS.csv')
|
33 |
+
dataICD10 = pd.read_csv(f'ICD10Diagnosis.csv')
|
34 |
+
dataASMT = pd.read_csv(f'TemplateInfoMessage.csv')
|
35 |
|
36 |
dir_path = os.path.dirname(os.path.realpath(__file__))
|
37 |
EXAMPLES = {}
|
|
|
79 |
swith=data.loc[data['Description'].str.contains(name, case=False, na=False)]
|
80 |
return swith
|
81 |
|
82 |
+
def MatchASMT(name):
|
83 |
+
#basedir = os.path.dirname(__file__)
|
84 |
+
pd.set_option("display.max_rows", None)
|
85 |
+
#data = pd.read_csv(basedir + "\\" + f'LoincTableCore.csv')
|
86 |
+
data = dataLOINC
|
87 |
+
swith=data.loc[data['AssessmentInfo'].str.contains(name, case=False, na=False)]
|
88 |
+
return swith
|
89 |
+
|
90 |
def SaveResult(text, outputfileName):
|
91 |
#try:
|
92 |
basedir = os.path.dirname(__file__)
|
|
|
97 |
if file_exists:
|
98 |
with open(outputfileName, "a") as f: #append
|
99 |
#for line in text:
|
100 |
+
f.write(str(text.replace("\n"," ")))
|
101 |
f.write('\n')
|
102 |
else:
|
103 |
with open(outputfileName, "w") as f: #write
|
104 |
#for line in text:
|
105 |
+
f.write(str(text.replace("\n"," ")))
|
106 |
f.write('\n')
|
107 |
#except ValueError as err:
|
108 |
# raise ValueError("File Save Error in SaveResult \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None
|
|
|
132 |
|
133 |
def get_today_filename():
|
134 |
from datetime import datetime
|
135 |
+
# date = datetime.now().strftime("%Y_%m_%d-%I.%M.%S.%p")
|
136 |
+
date = datetime.now().strftime("%Y_%m_%d-%I")
|
137 |
#print(f"filename_{date}") 'filename_2023_01_12-03-29-22_AM'
|
138 |
return f"MedNER_{date}.csv"
|
139 |
|
|
|
163 |
g3=MatchSNOMED(eterm)
|
164 |
g4=MatchOMS(eterm)
|
165 |
g5=MatchICD10(eterm)
|
166 |
+
g6=MatchASMT(eterm)
|
167 |
sAll = ""
|
168 |
|
169 |
print("Saving to output file " + outputFile)
|
|
|
208 |
s5 = ("ICD10," + myEntityGroup + "," + eterm + ",descriptions of ," + g52 + "," + g51 + ", Label,Value, Label,Value, Label,Value ")
|
209 |
if g51 != 'Series([] )': SaveResult(s5, outputFile)
|
210 |
|
211 |
+
#ASMT
|
212 |
+
g61 = g6['ID'].to_string().replace(","," ").replace("\n"," ")
|
213 |
+
g62 = g6['AssessmentName'].to_string().replace(","," ").replace("\n"," ")
|
214 |
+
g63 = g6['AssessmentInfo'].to_string().replace(","," ").replace("\n"," ")
|
215 |
+
s6 = ("ASMT," + myEntityGroup + "," + eterm + ",assessment info ," + g62 + "," + g63 + ", Label,Value, Label,Value, Label,Value ")
|
216 |
+
if g63 != 'Series([] )': SaveResult(s6, outputFile)
|
217 |
+
|
218 |
except ValueError as err:
|
219 |
raise ValueError("Error in group by entity \n" + format_tb(err.__traceback__)[0] + err.args[0] + "\nEnd of error message.") from None
|
220 |
|
|
|
247 |
}
|
248 |
|
249 |
outputFile = group_by_entity(raw)
|
250 |
+
# print("print" + " " + outputFile)
|
251 |
label = EXAMPLES.get(text, "Unknown")
|
252 |
outputDataframe = pd.read_csv(outputFile)
|
253 |
return (ner_content, outputDataframe, outputFile)
|