Spaces:
Running
Running
import os | |
os.environ["CUDA_VISIBLE_DEVICES"] = "1,6" #GPUs to use | |
os.environ["HF_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub" | |
os.environ["HUGGINGFACE_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub" | |
os.environ["HF_HOME"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub" | |
from transformers import file_utils | |
print(file_utils.default_cache_path) | |
import sys, re | |
import time | |
import pandas as pd | |
from tqdm import tqdm | |
from concurrent.futures import ThreadPoolExecutor, as_completed | |
from collections import Counter | |
from gliner import GLiNER, GLiNERConfig, data_processing | |
#os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512" | |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True' | |
import torch | |
torch.cuda.empty_cache() # Clear cache ot torch | |
import logging | |
import tiktoken | |
from langchain.text_splitter import TokenTextSplitter | |
import requests | |
import re | |
from common import strtobool, split_camel_case, chunk_tokens, update_nested_dict, cleanInputText, token_counter, encoding_getter, extract_words, all_words_in_list, row_to_dict_string, strip_quotes, rescale_exponential_to_linear, rescale_exponential_to_logarithmic | |
from accelerate import Accelerator | |
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# print(f"Device: {device}...") | |
# if device.type == "cuda": | |
# print("GPU number:", torch.cuda.current_device()) | |
accelerator = Accelerator() | |
#accelerator = Accelerator(mixed_precision='fp16', device_placement=False) #fp16 bf16 | |
device = accelerator.device | |
print("Using accelerator device = "+ str(device)) | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from transformers import pipeline | |
from transformers.pipelines.pt_utils import KeyDataset | |
from virtuosoQueryRest import sparqlQuery | |
from llmqueryNer import call_model, call_model_with_caching, process_list, setup_gptjrc, api_call_gptjrc, model_list_gptjrc #api_call_HFonPremises | |
import string | |
import datasets | |
import argparse | |
import json | |
import random | |
import numpy as np | |
#from retrieverRAG_testing import RAG_retrieval_Base, RAG_retrieval_Z_scores, RAG_retrieval_Percentile, RAG_retrieval_TopK, retrievePassageSimilarities | |
from retrieverRAG_SF import RAG_retrieval_Base | |
from joblib import Memory | |
cachedir = 'cached' | |
mem = Memory(cachedir, verbose=False) | |
# this is to completely delete the cache: | |
# mem.clear(warn=False) | |
# aa = data_processing.tokenizer.WordsSplitter('whitespace' ) | |
# bb=aa("this is what i wanted to say") | |
# count = 0 | |
# for item in bb: | |
# count = count+1 | |
# print(item) | |
# print(count) | |
POSSIBLE_KGchoices_List = ["AI", "AIO", "AEO", "BFO", "BIM", "BCGO", "CL", "CHIRO", "CHEBI", "DCM", "FMA", "GO", "GENO", | |
"GeoSPARQL", "HL7", "DOID", "HP", "HP_O", "IDO", "IAO", "ICD10", "LOINC", "MESH", | |
"MONDO", "NCIT", "NCBITAXON", "NCBITaxon_", "NIFCELL", "NIFSTD", "GML", "OBCS", "OCHV", "OHPI", | |
"OPB", "TRANS", "PLOSTHES", "RADLEX", "RO", "STY", "SO", "SNOMED", "STATO", | |
"SYMP", "FoodOn", "UBERON", "ORDO", "HOOM", "VO", "OGMS", "EuroSciVoc"] | |
ONLY_Ontologies_OnBIOPORTAL = ["AI", "AIO", "AEO", "BCGO", "BFO", "BIM", "CHEBI", "CHIRO", "CL", "DCM", "DOID", "FMA", "FOODON", "GENO", "GML", "GO", "GEOSPARQL", "HL7", "HP", "HP_O", "IAO", "ICD10", "IDO", "LOINC", "MESH", "MONDO", "NCBITAXON", "NCIT", "NIFCELL", "NIFSTD", "OBCS", "OCHV", "OHPI", "OPB", "PLOSTHES", "RADLEX", "OBOREL", "SNOMEDCT", "SO", "STATO", "STY", "SYMP", "PTRANS", "UBERON", "ORDO", "HOOM", "VO", "OGMS"] | |
# query = 'A man is eating a piece of bread' | |
# | |
# # Define the passages list | |
# passages = [ | |
# "A man is eating food.", | |
# "A man is eating pasta.", | |
# "The girl is carrying a baby.", | |
# "A man is riding a horse.", | |
# ] | |
# | |
# df_retr = RAG_retrieval_Base(questionText, passages, min_threshold=0.5, max_num_passages=3) | |
# print(df_retr) | |
# Define a handler for non-serializable objects | |
def default_serializer(obj): | |
if isinstance(obj, np.floating): | |
return float(obj) | |
raise TypeError(f"Object of type {obj.__class__.__name__} is not JSON serializable") | |
def is_json(myjson): | |
try: | |
# json.loads(myjson, default=default_serializer) | |
json.dumps(myjson, default=default_serializer) | |
except ValueError as e: | |
return False | |
return True | |
def get_filtered_entities(txt_ents): | |
filtered_ent_list = [] | |
for ent_dct in txt_ents: | |
if ent_dct["score"] > 0.0: # 0.5 | |
if ent_dct["entity_group"]: # in ["LOC", "MISC"]: | |
filtered_ent_list.append(ent_dct) | |
return filtered_ent_list | |
def process_row_Gliner(args, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels, row): | |
context_to_annotate = row[args.source_column] | |
tokens = tokenizerGliner.tokenize(context_to_annotate) | |
entities = [] | |
offset = 0 | |
if "gliner_large_bio" in args.model_id: | |
max_chunk_length = modelGlinerBio.config.max_len | |
else: | |
max_chunk_length = modelGliner.config.max_len | |
for chunk in chunk_tokens(tokens, (max_chunk_length - 1)): | |
chunk_text = tokenizerGliner.convert_tokens_to_string(chunk) | |
if "gliner_large_bio" in args.model_id: | |
chunk_entities = modelGlinerBio.predict_entities(chunk_text, glinerlabels, | |
threshold=args.entities_filter_threshold) | |
else: | |
chunk_entities = modelGliner.predict_entities(chunk_text, glinerlabels, | |
threshold=args.entities_filter_threshold) | |
adjusted_entities = [] | |
for entity in chunk_entities: | |
adjusted_entity = { | |
'text': entity['text'], | |
'score': entity['score'], | |
'start': entity['start'] + offset, | |
'end': entity['end'] + offset, | |
'label': entity['label'] | |
} | |
adjusted_entities.append(adjusted_entity) | |
entities.extend(adjusted_entities) | |
offset += len(chunk_text) | |
if entities and isinstance(entities, list): | |
for d in entities: | |
d['entity_group'] = d.pop('label') | |
d['word'] = d.pop('text') | |
d['entity_group'] = d['entity_group'].upper() | |
return row.name, entities | |
def process_row_BioPortal_api(args, key_bioportal, row): | |
#context_to_annotate = row[args.source_column] | |
if isinstance(row, list) or isinstance(row, pd.Series): | |
context_to_annotate = row[args.source_column] | |
elif isinstance(row, str): | |
context_to_annotate = row | |
else: | |
raise ValueError("Unsupported type for row. Expected list or string.") | |
url="" | |
if getattr(args, 'KG_restriction', None): | |
# api call | |
if strtobool(args.debug): | |
print("--- BIOPORTAL: " + context_to_annotate) | |
# args.KG_restriction exists and is not empty | |
if strtobool(args.debug): | |
print("KG_restriction is provided and not empty:", args.KG_restriction) | |
onto_clauses = "" | |
for choice in args.KG_restriction: | |
if choice == "SNOMED": | |
choice="SNOMEDCT" | |
elif choice == "RO": | |
choice = "OBOREL" | |
elif choice == "TRANS": | |
choice = "PTRANS" | |
elif choice == "FoodOn": | |
choice = "FOODON" | |
elif choice == "GeoSPARQL": | |
choice = "GEOSPARQL" | |
# elif choice == "NCBITAXON": | |
# choice = "NCBITAXON,NCBITaxon_" | |
elif choice == "NCBITaxon_": | |
choice = "NCBITAXON" | |
if choice in ONLY_Ontologies_OnBIOPORTAL: | |
onto_clauses=onto_clauses+choice+"," | |
if onto_clauses and onto_clauses[-1] == ",": | |
onto_clauses=onto_clauses[:-1] | |
url = f"https://services.data.bioontology.org/annotatorplus/?text={context_to_annotate}&ontologies={onto_clauses}&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}" | |
else: | |
kg_restriction = getattr(args, 'KG_restriction', None) | |
if kg_restriction is not None and len(kg_restriction) == 0: | |
print("KG_restriction is provided but empty") | |
return pd.DataFrame() | |
# args.KG_restriction does not exist or is empty | |
if strtobool(args.debug): | |
print("--- BIOPORTAL: " + context_to_annotate) | |
print("KG_restriction is not provided or empty - Consider all the KGs") | |
url = f"https://services.data.bioontology.org/annotatorplus/?text={context_to_annotate}&ontologies=AEO,BFO,BIM,BCGO,CL,CHIRO,CHEBI,DCM,FMA,GO,GENO,GEOSPARQL,HL7,DOID,HP,HP_O,IDO,IAO,ICD10,LOINC,MESH,MONDO,NCIT,NCBITAXON,NIFCELL,NIFSTD,GML,OBCS,OCHV,OHPI,OPB,PTRANS,PLOSTHES,RADLEX,OBOREL,STY,SO,SNOMEDCT,STATO,SYMP,FOODON,UBERON,VO&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}" | |
response = requests.get(url) | |
try: | |
data = response.json() | |
if not data: | |
# nothing found from Bioportal | |
return pd.DataFrame() | |
dff = pd.DataFrame(data) | |
dff = dff.drop(columns=['hierarchy', 'mappings']) | |
# If the columns are dictionary-like, use pd.json_normalize: | |
expanded_annotated_class = pd.json_normalize(dff['annotatedClass']) | |
expanded_annotations = pd.DataFrame(dff['annotations'].tolist(), index=dff.index) | |
expanded_annotations = pd.json_normalize(expanded_annotations[0]) | |
# Join the expanded columns back to the original DataFrame | |
df_expanded = dff.drop(columns=['annotatedClass', 'annotations']).join(expanded_annotated_class).join( | |
expanded_annotations) | |
# Snomed id replacement because in our internal knolwedgebase we have this base uri | |
df_expanded['@id'] = df_expanded['@id'].str.replace( | |
"http://purl.bioontology.org/ontology/SNOMEDCT/", | |
"http://snomed.info/id/" | |
) | |
return df_expanded | |
except Exception as err: | |
logging.error( | |
f'ERROR ON BioPortal Annotator API Call\n\tError: {err}\n TextToAnnotate: {context_to_annotate}\n Have a check...') | |
return pd.DataFrame() # empty dataframe | |
def parallel_process_df_Gliner(args, df, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels): | |
results = [] | |
if args.num_cores_Gliner > 0: | |
with ThreadPoolExecutor(max_workers=args.num_cores_Gliner) as executor: | |
futures = [ | |
executor.submit( | |
process_row_Gliner, args, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels, row | |
) | |
for _, row in df.iterrows() | |
] | |
for future in tqdm(futures): | |
drm_idx, entities = future.result() | |
df.at[drm_idx, 'annotation'] = entities | |
else: | |
# Apply the function to each row and extract only the entities part | |
df['annotation'] = df.apply( | |
lambda row: process_row_Gliner(args, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels, row)[1], | |
axis=1 | |
) | |
return df | |
def annotate(df, args, pipeInner, tokenizerGliner, modelGliner, modelGlinerBio, device="cpu"): | |
if strtobool(args.debug): | |
print("\nAnnotate using " + args.model_id) | |
print("device=" + str(device)) | |
startAnnotate = time.time() | |
if "gliner" in args.model_id: | |
df['model'] = args.model_id | |
df['annotation'] = None | |
glinerlabels = ["location", "disease", "date", "numerical value", "number"] | |
# Parallel CPU computation for Gliner: | |
df = parallel_process_df_Gliner(args, df, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels) | |
# for drm_idx, row in tqdm(df.iterrows()): | |
# context_to_annotate = row[args.source_column] | |
# | |
# # Tokenize the text | |
# tokens = tokenizerGliner.tokenize(context_to_annotate) | |
# | |
# # Process each chunk and predict entities | |
# entities = [] | |
# offset = 0 # Initialize the offset | |
# | |
# if "gliner_large_bio" in args.model_id: | |
# maxchunckslen=modelGlinerBio.config.max_len | |
# else: | |
# maxchunckslen = modelGliner.config.max_len | |
# for chunk in chunk_tokens(tokens, (maxchunckslen - 1)): | |
# # Convert tokens back to text for the chunk | |
# chunk_text = tokenizerGliner.convert_tokens_to_string(chunk) | |
# # Predict entities for the chunk | |
# if "gliner_large_bio" in args.model_id: | |
# chunk_entities = modelGlinerBio.predict_entities(chunk_text, glinerlabels, threshold=args.entities_filter_threshold) | |
# else: | |
# chunk_entities = modelGliner.predict_entities(chunk_text, glinerlabels, threshold=args.entities_filter_threshold) | |
# | |
# # Adjust the start and end positions of entities to reflect their positions in the original text | |
# adjusted_entities = [] | |
# for entity in chunk_entities: | |
# adjusted_entity = { | |
# 'text': entity['text'], | |
# 'score': entity['score'], | |
# 'start': entity['start'] + offset, | |
# 'end': entity['end'] + offset, | |
# 'label': entity['label'] | |
# } | |
# adjusted_entities.append(adjusted_entity) | |
# | |
# # Append adjusted entities to all_entities | |
# entities.extend(adjusted_entities) | |
# | |
# # Update the offset for the next chunk by adding the length of the current chunk | |
# offset += len(chunk_text) | |
# | |
# # Now `all entities` contains all entities with adjusted positions | |
# if entities and isinstance(entities, list): | |
# | |
# # if strtobool(args.debug): | |
# # for entity in entities: | |
# # print(entity["text"], "=>", entity["label"]) | |
# | |
# for d in entities: | |
# d['entity_group'] = d.pop('label') # Change 'label' to 'entity_group' | |
# d['word'] = d.pop('text') # Change 'text' to 'word' | |
# d['entity_group'] = d['entity_group'].upper() | |
# | |
# df.at[drm_idx, 'annotation'] = entities | |
df_annot = df.explode('annotation').dropna(subset=['annotation']).reset_index(drop=True) | |
elif "NCBO" in args.model_id: #NCBO/BioPortal" annotator | |
#https://data.bioontology.org/documentation#nav_annotator | |
#https://bioportal.bioontology.org/annotatorplus | |
key_bioportal = "" | |
if args.bioportalkey_filename and os.path.exists(args.bioportalkey_filename): | |
fkeyname = args.bioportalkey_filename | |
with open(fkeyname) as f: | |
key_bioportal = f.read() | |
else: | |
key_bioportal = os.environ['key_bioportal'] | |
df_annot = pd.DataFrame() | |
for drm_idx, row in tqdm(df.iterrows()): | |
df_BioPortalAnnotation=process_row_BioPortal_api(args, key_bioportal, row) | |
if not df_BioPortalAnnotation.empty: | |
df_BioPortalAnnotation = df_BioPortalAnnotation.sort_values( | |
by=['from', 'text', 'score', 'matchType'], ascending=[True, True, False, False]) | |
df_biop_minimised = df_BioPortalAnnotation.copy() | |
# Group the dataframe by "from" and "to" columns | |
grouped_biop = df_biop_minimised.groupby(['from', 'to']) | |
# Get the index of the row with the maximum score in each group | |
idx_biop = grouped_biop['score'].idxmax() | |
# Filter the dataframe using these indices | |
df_max_score_biop = df_biop_minimised.loc[idx_biop] | |
# Optional: Reset index if you want a clean index | |
df_max_score_biop = df_max_score_biop.reset_index(drop=True) | |
# Create the "pippo" column by grouping and collecting "@id" values | |
pippo_lists = grouped_biop['@id'].apply(list).reset_index() | |
# Merge the lists back into the df_max_score_biop by 'from' and 'to' | |
df_max_score_biop = df_max_score_biop.merge(pippo_lists, on=['from', 'to']) | |
# Rename the merged column to "pippo" | |
df_max_score_biop = df_max_score_biop.rename(columns={'@id_x': '@id'}) | |
df_max_score_biop = df_max_score_biop.rename(columns={'@id_y': 'ALLURIScontextFromNCBO'}) | |
# The df_max_score_biop now contains the "pippo" column with lists of "@id" values. | |
# Filter the dataframe to keep only rows where the score is greater than 3.0 | |
df_max_score_biop = df_max_score_biop[df_max_score_biop['score'] > 3.0] | |
if "semantic_groups" not in df_max_score_biop.columns: | |
# Drop the '@id' column | |
df_max_score_biop["semantic_groups"] = None | |
# Specify the columns you want to keep | |
columns_to_keep = ["score", "from", "to", "prefLabel", "text", "semantic_groups", "@id", "ALLURIScontextFromNCBO"] | |
# Subset the dataframe to keep only the specified columns | |
df_max_score_biop = df_max_score_biop[columns_to_keep] | |
# Rename the specified columns | |
df_max_score_biop = df_max_score_biop.rename(columns={"from": "start", "to": "end", "text": "word", "semantic_groups": "entity_group"}) | |
# Optional: Reset index if you want a clean index after filtering | |
df_max_score_biop = df_max_score_biop.reset_index(drop=True) | |
df_max_score_biop['score'] = df_max_score_biop['score'].round(2) | |
# Each row of "semantic_groups" --> 'entity_group' is a list of string ... keep only the first part if not NaN | |
# df_max_score_biop['entity_group'] = df_max_score_biop['entity_group'].apply(lambda x: x[0] if pd.notna(x).all() else x) | |
df_max_score_biop['entity_group'] = df_max_score_biop['entity_group'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else (np.nan if x is None or (isinstance(x, float) and pd.isna(x)) else x)) | |
#df_max_score_biop = df_max_score_biop.dropna(subset=['entity_group']) | |
all_empty_or_nan_or_empty_string = df_max_score_biop['entity_group'].replace('', pd.NA).isna().all() | |
if not all_empty_or_nan_or_empty_string: | |
#print("Is the 'entity_group' column completely NaN, None, or empty strings?", all_empty_or_nan_or_empty_string) | |
# Identify the minimum score | |
min_score_biop = df_max_score_biop['score'].min() | |
# Apply filter to find rows where entity_group is None and score is the minimum one | |
conditionBiop = (df_max_score_biop['entity_group'].isna()) & (df_max_score_biop['score'] == min_score_biop) | |
df_max_score_biop = df_max_score_biop[~conditionBiop] | |
# Replace all NaN values in 'entity_group' with 'BIOP' | |
df_max_score_biop['entity_group'] = df_max_score_biop['entity_group'].fillna('BIOP') | |
# To delete the columns "start" and "end" | |
#df_max_score_biop = df_max_score_biop.drop(columns=['start', 'end', 'word', 'entity_group', 'score', 'prefLabel']) | |
if not df_max_score_biop.empty: | |
row_df = pd.DataFrame([row] * len(df_max_score_biop), columns=row.index) | |
row_df['model'] = args.model_id #'NCBO/BioPortal' | |
df_max_score_biop = pd.concat([row_df.reset_index(drop=True), df_max_score_biop.reset_index(drop=True)], | |
axis=1) | |
df_annot = pd.concat([df_annot, df_max_score_biop], ignore_index=True) | |
#here I finish the for cycle | |
if not df_annot.empty: | |
mmax_score = df_annot['score'].max() | |
mmin_score = df_annot['score'].min() | |
if mmax_score == mmin_score: | |
df_annot['score'] = 0.3 | |
df_annot.loc[df_annot['score'].notnull(), 'score'] = 0.7 | |
else: | |
# Apply the transformation function | |
#df_annot = rescale_exponential_to_linear(df_annot, 'score', new_min=0.5, new_max=1.0) | |
df_annot = rescale_exponential_to_logarithmic(df_annot, 'score', new_min=0.7, new_max=1.0) | |
columnsDict = ['start', 'end', 'word', 'entity_group', 'score', 'prefLabel'] | |
df_annot['annotation'] = df_annot.apply(row_to_dict_string, axis=1, columnsDict=columnsDict) | |
# Convert JSON string to a dictionary, if needed | |
df_annot['annotation'] = df_annot['annotation'].apply( | |
lambda x: json.loads(x) if isinstance(x, str) else x | |
) | |
df_annot = df_annot.drop(columns=columnsDict) | |
else: | |
HF_dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=df)) | |
# tsk = "token-classification" | |
# | |
# pipe = pipeline( | |
# tsk, | |
# model=args.model_id, | |
# aggregation_strategy="simple", | |
# device=device, | |
# ) | |
if strtobool(args.debug): | |
print('Annotating texts...') | |
annotated_texts = [ | |
# out for out in tqdm(pipe(KeyDataset(HF_dataset, args.source_column), batch_size=args.batch_size)) | |
# out for out in tqdm(ner_pipeline(KeyDataset(HF_dataset, args.source_column), batch_size=args.batch_size)) | |
out for out in tqdm(pipeInner(KeyDataset(HF_dataset, args.source_column), batch_size=args.batch_size)) | |
] | |
if strtobool(args.debug): | |
print('looping annotations...') | |
df['model'] = args.model_id | |
df['annotation'] = annotated_texts | |
df_annot = df.explode('annotation').dropna(subset=['annotation']).reset_index(drop=True) | |
# # Initialize an empty dataframe with the same columns as df | |
# df_annot = pd.DataFrame(columns=df.columns.tolist()) #+ ['model', 'annotation']) | |
# | |
# for drm_idx, txt_ents in enumerate(tqdm(annotated_texts)): | |
# | |
# filtered_entities = get_filtered_entities(txt_ents) | |
# | |
# for x_ent in filtered_entities: | |
# # Create a new row with the annotation | |
# new_row = df.iloc[drm_idx].copy() | |
# new_row['model'] = args.model_id | |
# new_row['annotation'] = x_ent | |
# df_annot = pd.concat([df_annot, new_row.to_frame().T], ignore_index=True) # Append the new row to the dataframe | |
if strtobool(args.debug): | |
endAnnotate = time.time() | |
hours, rem = divmod(endAnnotate - startAnnotate, 3600) | |
minutes, seconds = divmod(rem, 60) | |
print("...end annotation - Time... {:0>2}:{:0>2}:{:05.2f}\n".format(int(hours), int(minutes), seconds)) | |
print('\n') | |
return df_annot | |
### | |
def is_cross_inside(df_sorted, args, valuecutCross=0.75): | |
df_sorted['IsCrossInside'] = 0 | |
df_sorted = df_sorted.reset_index(drop=True) # I reset the index so that I can loop on inner loop inside | |
# this works but it is absolutely too heavy!!! Got: mask1 = (SentenceRef_array[:, None] == SentenceRef_array) & \ | |
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
# numpy.core._exceptions._ArrayMemoryError: Unable to allocate 73.4 TiB for an array with shape (8984940, 8984940) and data type bool | |
# | |
# # Assuming df_sorted is your DataFrame | |
# df_sorted['IsCrossInside'] = 0 | |
# | |
# # Convert necessary columns to numpy arrays | |
# SentenceRef_array = df_sorted['SentenceRef'].to_numpy() | |
# start_array = df_sorted['start'].to_numpy() | |
# end_array = df_sorted['end'].to_numpy() | |
# score_array = df_sorted['score'].to_numpy() | |
# ToLink_array = df_sorted['ToLink'].to_numpy() | |
# | |
# # Identify NaN or empty strings in ToLink array | |
# toLink_nan_or_empty = pd.isna(ToLink_array) | (ToLink_array == '') | |
# | |
# # Create the mask for the first set of conditions | |
# mask1 = (SentenceRef_array[:, None] == SentenceRef_array) & \ | |
# (start_array[:, None] <= start_array) & \ | |
# (end_array[:, None] > start_array) & \ | |
# (end_array[:, None] <= end_array) & \ | |
# (score_array[:, None] < score_array) & \ | |
# toLink_nan_or_empty[:, None] | |
# | |
# # Create the mask for the second set of conditions | |
# mask2 = (SentenceRef_array[:, None] == SentenceRef_array) & \ | |
# (end_array[:, None] >= end_array) & \ | |
# (start_array[:, None] < end_array) & \ | |
# (start_array[:, None] >= start_array) & \ | |
# (score_array[:, None] < score_array) & \ | |
# toLink_nan_or_empty[:, None] | |
# | |
# # Combine the masks | |
# mask = mask1 | mask2 | |
# | |
# # Aggregate the masks and set 'IsCrossInside' where the condition is True | |
# df_sorted.loc[mask.any(axis=1), 'IsCrossInside'] = 1 | |
# df_sorted['IsCrossInside'] = 0 | |
# | |
# #questo for e' altamente inefficiente...guardare per alternative piu' ottimizzate!!! | |
# for index, row in df_sorted.iterrows(): | |
# | |
# print(row) | |
# | |
# mask = (df_sorted['SentenceRef'] == row['SentenceRef']) & (df_sorted['start'] <= row['start']) & (df_sorted['end'] > row['start']) & ( | |
# df_sorted['end'] <= row['end']) & (df_sorted['score'] < row['score']) & (df_sorted['ToLink'].isnull() | (df_sorted['ToLink'] == '')) | |
# mask |= (df_sorted['SentenceRef'] == row['SentenceRef']) & (df_sorted['end'] >= row['end']) & (df_sorted['start'] < row['end']) & ( | |
# df_sorted['start'] >= row['start']) & (df_sorted['score'] < row['score']) & (df_sorted['ToLink'].isnull() | (df_sorted['ToLink'] == '')) | |
# | |
# df_sorted.loc[mask, 'IsCrossInside'] = 1 | |
# Iterate over the DataFrame with an outer loop - I know that df_sorted is ordered by 'SentenceRef' and 'start' | |
indexes_list = [] | |
scores_list = [] | |
IsToLinkContained = [] | |
for i, row_outer in tqdm(df_sorted.iterrows()): | |
#print(row_outer) | |
# if (i>0) and (df_sorted.iloc[i-1]['SentenceRef'] == row_outer['SentenceRef']): # if the previous row is of the same SenteceRef, it has been alredy computed and I skip | |
# continue | |
if len(indexes_list)==0: | |
scores_list.append(row_outer['score']) | |
indexes_list.append(i) | |
if (pd.isnull(row_outer['ToLink']==False) and len(row_outer['ToLink'])>0): | |
IsToLinkContained.append(True) | |
else: | |
IsToLinkContained.append(False) | |
# if (pd.isnull(row_outer['ToLink']) or row_outer['ToLink'] == ''): | |
# IsToLinkContained.append(False) | |
# elif len(row_outer['ToLink'])>0: | |
# IsToLinkContained.append(True) | |
else: | |
if i in indexes_list: | |
if (i == indexes_list[-1]): | |
# indexes_list = [] | |
# scores_list = [] | |
# IsToLinkContained = [] | |
indexes_list = [indexes_list[-1]] | |
scores_list = [scores_list[-1]] | |
IsToLinkContained = [IsToLinkContained[-1]] | |
else: | |
continue | |
# Inner loop only needs to consider rows starting from the current position | |
for j in range(i + 1, len(df_sorted)): | |
#for j in range(0, len(df_sorted)): | |
# if i==j: continue | |
#print(j) | |
row_inner = df_sorted.iloc[j] | |
# Break the inner loop if SentenceRef changes (due to sorting) | |
if row_inner['SentenceRef'] != row_outer['SentenceRef']: | |
break | |
elif row_inner['start'] >= row_outer['end']: | |
break | |
else: | |
scores_list.append(row_inner['score']) | |
indexes_list.append(j) | |
if (pd.isnull(row_inner['ToLink'] == False) and len(row_inner['ToLink']) > 0): | |
IsToLinkContained.append(True) | |
else: | |
IsToLinkContained.append(False) | |
if len(indexes_list)>1: | |
first_true_index = -1 | |
try: | |
first_true_index = IsToLinkContained.index(True) | |
#print(f"The first index with value True is: {first_true_index}") | |
except ValueError: | |
first_true_index = -1 | |
#print("No True value found in the list") | |
topinlist=-1 | |
if first_true_index >=0: | |
topinlist = first_true_index | |
else: | |
topinlist = scores_list.index(max(scores_list)) | |
#print(f"The position with the maximum score is: {topinlist}") | |
if topinlist >= 0: | |
for xx in range(0, len(indexes_list)): | |
if xx == topinlist: | |
continue | |
df_sorted.at[indexes_list[xx], 'IsCrossInside'] = 1 | |
else: | |
indexes_list = [] | |
scores_list = [] | |
IsToLinkContained = [] | |
#Now I want to delete all the words that are contained within another . For example I want to drop "osis" from Legionellosis | |
if not df_sorted.empty: | |
# punctuation_and_space = set(string.punctuation + ' ') | |
# condition = df_sorted.apply(lambda row: | |
# (row['IsCrossInside'] == 0) or | |
# ((row['IsCrossInside'] == 1) and | |
# ((row[args.source_column][row['start'] - 1] in punctuation_and_space) if row[ | |
# 'start'] - 1 >= 0 else True) and | |
# ((row[args.source_column][row['end']] in punctuation_and_space) if row['end'] <= len( | |
# row[args.source_column]) else True)), | |
# axis=1) | |
condition = df_sorted.apply(lambda row: | |
(row['IsCrossInside'] == 0) or | |
( (row['IsCrossInside'] == 1) and (row['score'] >=valuecutCross )), | |
axis=1) | |
# Filter the DataFrame to keep only rows where the condition is False | |
df_sorted = df_sorted[condition] | |
return df_sorted | |
def entitiesFusion(df_annotated, args): | |
if strtobool(args.debug): | |
print("\nStart entities fusion and filtering ...") | |
areJson = df_annotated["annotation"].apply(is_json) | |
if False in areJson.unique(): | |
for idxr, rr in df_annotated.iterrows(): | |
# for idxr in range(df_annotated["annotation"].shape[0]): | |
if areJson[idxr] == False: | |
print("PROBLEM WITH JSON AT INDEX " + str(idxr) + ":\n" + df_annotated["annotation"][idxr]) | |
replacement_empty_myjson = '{\"entity_group\": \"\", \"score\": \"\", "word": \"\", \"start\": \"\", \"end\": \"\"}' # {'entity_group': 'ORG', 'score': 0.9999951, 'word': 'Health Ministry', 'start': 4, 'end': 19} | |
df_annotated.at[idxr, "annotation"] = replacement_empty_myjson | |
print(" ...... Then replacing it with empty JSON --> " + df_annotated["annotation"][idxr]) | |
try: | |
# df_extract = df_annotated.apply(lambda x: pd.Series( | |
# json.loads(x['annotation'], default=default_serializer).values(), | |
# index=json.loads(x['annotation'], default=default_serializer).keys()), axis=1) | |
df_extract = df_annotated.apply(lambda x: pd.Series(x['annotation'].values(), | |
index=x['annotation'].keys()), axis=1) | |
# Check if '@id' column exists in df_Extract | |
if '@id' in df_extract.columns: | |
# Drop the '@id' column | |
df_extract = df_extract.drop(columns='@id') | |
df_annotated = pd.merge(df_annotated, df_extract, left_index=True, right_index=True) | |
except Exception as err: | |
logging.error( | |
f'FAILED to extract json results\n\tError: {err}\nLeaving it as a single column then and not decompressing! Have a check...') | |
# | |
# Delete all the rows where EXACT MATCHING NOT MET: | |
# Apply the conditions | |
condition_to_delete = ( | |
df_annotated[args.source_column].str.startswith('"') & | |
df_annotated[args.source_column].str.endswith('"') & | |
(df_annotated[args.source_column].apply(strip_quotes).str.lower() != df_annotated['word'].str.lower()) | |
) | |
# Now Filter out the rows where condition_to_delete is True | |
df_annotated = df_annotated[~condition_to_delete].copy() | |
# | |
#delete all the rows with score smaller than entities_filter_threshold: | |
if args.entities_filter_threshold > 0: | |
#df_annotated = df_annotated[df_annotated['score'] >= args.entities_filter_threshold] | |
df_annotated = df_annotated[df_annotated['score'] > args.entities_filter_threshold] | |
if df_annotated.empty: | |
return df_annotated | |
# #delete all the rows where the concept that was asked to link, in the column "ToLink", is present and it is different from the column "word", that was recognised | |
# df_annotated = df_annotated[(df_annotated['ToLink'] == df_annotated['word']) | df_annotated['ToLink'].isna()] | |
# in all the rows having a value not null for the column "ToLink", compare this value to that of the column "word". If they are different, set the value in "ToLink" to None | |
# df_annotated.loc[ | |
# (~df_annotated['ToLink'].isnull()) & (df_annotated['ToLink'] != df_annotated['word']), 'ToLink'] = None | |
df_annotated.loc[ | |
(~df_annotated['ToLink'].isnull()) & ( | |
df_annotated['ToLink'].str.casefold() != df_annotated['word'].str.casefold()), 'ToLink'] = None | |
# now fill all the values of the column "toLink" that are empty with the values of the row "word": | |
# df_annotated['ToLink'] = df_annotated['ToLink'].fillna(df_annotated['word']) | |
if "IsGeo" not in df_annotated.columns: | |
#df_annotated["IsGeo"] = None # 0 | |
df_annotated.loc[:, "IsGeo"] = None | |
if "IsBio" not in df_annotated.columns: | |
#df_annotated["IsBio"] = None # 0 | |
df_annotated.loc[:, "IsBio"] = None | |
df_annotated.loc[df_annotated['entity_group'] == 'LOCATION', 'entity_group'] = "LOC" | |
df_annotated.loc[df_annotated['entity_group'] == 'LOC', 'IsGeo'] = 1 | |
#df_annotated.loc[df_annotated['entity_group'] == 'DISEASE', 'IsBio'] = 1 | |
df_annotated.loc[df_annotated['entity_group'].str.lower().str.contains('disease'), 'IsBio'] = 1 | |
df_annotated.loc[(df_annotated['model'].str.contains('Medical-NER')) & ( | |
df_annotated['entity_group'].isin(['LOC', 'DATE', 'PER', 'ORG', 'DOSAGE', 'LAB_VALUE', 'DURATION']) == False), 'IsBio'] = 1 | |
df_annotated.loc[(df_annotated['model'].str.contains('NCBO')) & ( | |
df_annotated['entity_group'].isin(['CONC']) == False), 'IsBio'] = 1 | |
# !!! THIS CHECK HAS BEEN PLACED HERE BECAUSE NCBO Bioportal put Start+1 !!! SO I NEED TO DROP A ONE TO DROP DUPLICATES AND MAX SCORES | |
df_annotated.loc[df_annotated['model'].str.lower().str.contains('ncbo'), 'start'] -= 1 | |
# !!! THIS CHECK HAS BEEN PLACED HERE BECAUSE MEDICAL-NER PUT A VALUE OF START WHICH IS THE REAL ONE MINUS ONE, IN THE CASE THAT THE IDENTIFIED ENTITY STARTS WITH A SPACE!!! SO I NEED TO ADD A ONE TO DROP DUPLICATES AND MAX SCORES | |
df_annotated.loc[(df_annotated['model'] == 'blaze999/Medical-NER') & | |
df_annotated.apply(lambda row: row[args.source_column][row['start']] == ' ', | |
axis=1), 'start'] += 1 | |
# !!! THIS CHECK HAS BEEN PLACED HERE BECAUSE GLINER MODELS PUT A VALUE OF START and of END WHICH IS THE REAL ONE MINUS ONE, IN THE CASE THAT THE IDENTIFIED ENTITY STARTS WITH A SPACE!!! SO I NEED TO ADD A ONE to START and END TO DROP DUPLICATES AND MAX SCORES | |
df_annotated.loc[df_annotated['model'].str.lower().str.contains('gliner') & | |
df_annotated.apply(lambda row: row[args.source_column][row['start']] == ' ', | |
axis=1), 'end'] += 1 | |
df_annotated.loc[df_annotated['model'].str.lower().str.contains('gliner') & | |
df_annotated.apply(lambda row: row[args.source_column][row['start']] == ' ', | |
axis=1), 'start'] += 1 | |
#### Here I drop all the identified NER words which are strictly contained in other words | |
# Apply first the function to the "args.source_column" to create lists of words | |
df_annotated['extracted_words'] = df_annotated[args.source_column].apply(extract_words,putInLower=True) | |
# Filter the DataFrame | |
df_annotated = df_annotated[df_annotated.apply(lambda row: all_words_in_list(row['word'], row['extracted_words'], putInLower=True), axis=1)] | |
# drop the 'extracted_words' column afterwards: | |
df_annotated = df_annotated.drop(columns=['extracted_words']) | |
##### | |
# This operation sorts the DataFrame by the "ToLink" column in descending order, with null values at the end, and then uses the drop_duplicates method to drop all duplicate rows, | |
# except the first one, based on all columns except "ToLink". | |
# This way, it will keep the rows with the non-null value in "ToLink" if there are multiple rows with the same values in all columns except "ToLink". | |
df_annotated = df_annotated.sort_values(by='ToLink', ascending=False, na_position='last') | |
for col in df_annotated.columns: | |
if df_annotated[col].apply(lambda x: isinstance(x, dict)).any(): | |
if strtobool(args.debug): | |
print( | |
f"Column '{col}' contains dictionaries...converting it to strings otherwise it will not work the concat etc..") | |
df_annotated[col] = df_annotated[col].apply(lambda x: str(x)) | |
df_annotated = df_annotated.drop_duplicates(subset=[col for col in df_annotated.columns if | |
col != 'ToLink' and col != 'ALLURIScontextFromNCBO' and not df_annotated[col].apply( | |
lambda x: isinstance(x, dict)).any()], keep='first') | |
# df_annotated = df_annotated.loc[df_annotated.groupby(['SentenceRef', 'ToLink', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])['score'].idxmax()] | |
# df_annotated = df_annotated.loc[df_annotated.groupby(['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])['score'].idxmax()] | |
df_annotated_Geo = df_annotated.loc[df_annotated.groupby( | |
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower(), 'IsGeo'])[ | |
'score'].idxmax()] | |
df_annotated_Bio = df_annotated.loc[df_annotated.groupby( | |
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower(), 'IsBio'])[ | |
'score'].idxmax()] | |
df_annotated_all = df_annotated.loc[ | |
df_annotated.groupby(['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])[ | |
'score'].idxmax()] | |
# now you can concat | |
df_annotated_combined = pd.concat([df_annotated_Geo, df_annotated_Bio, df_annotated_all]) | |
df_annotated_combined = df_annotated_combined.drop_duplicates(subset=[col for col in df_annotated_combined.columns if | |
col != 'ToLink' and col != 'ALLURIScontextFromNCBO' and not df_annotated_combined[col].apply( | |
lambda x: isinstance(x, dict)).any()], keep='first') | |
# df_annotated_combined['IsBioGeo'] = df_annotated_combined['IsGeo'].fillna(0) + df_annotated_combined['IsBio'].fillna(0) | |
# df_annotated_combined['IsBioGeo'] = pd.to_numeric(df_annotated_combined['IsGeo'], errors='coerce').fillna(0) + pd.to_numeric(df_annotated_combined['IsBio'], errors='coerce').fillna(0) | |
# df_annotated_combined['IsBioGeo'] = df_annotated_combined['IsGeo'].infer_objects(copy=False).fillna(0) + df_annotated_combined['IsBio'].infer_objects(copy=False).fillna(0) | |
df_annotated_combined.loc[:, "IsBioGeo"] = df_annotated_combined.loc[:, 'IsGeo'].infer_objects(copy=False).fillna(0) + df_annotated_combined.loc[:, 'IsBio'].infer_objects(copy=False).fillna(0) | |
df_annotated_combined = df_annotated_combined.loc[df_annotated_combined.groupby( | |
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])['IsBioGeo'].idxmax()] | |
df_annotated_combined = df_annotated_combined.loc[ | |
df_annotated_combined.groupby( | |
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower(), 'IsBioGeo'])[ | |
'score'].idxmax()] | |
df_annotated_combined = df_annotated_combined.drop('IsBioGeo', axis=1) | |
df_annotated_combined.loc[df_annotated_combined['IsBio'] == 0, 'IsBio'] = None | |
df_annotated_combined.loc[df_annotated_combined['IsGeo'] == 0, 'IsGeo'] = None | |
df_annotated_combined = df_annotated_combined.sort_values(by=['SentenceRef', 'start', 'ToLink', 'word', 'score'], ascending=[True, True, True, True, False]) | |
#df_annotated_combined = df_annotated_combined.reindex(range(len(df_annotated_combined))) | |
return df_annotated_combined | |
def geonames_api_call(word,args, key_geonames,cache_map_geonames): | |
context = "" | |
singleContext = None | |
globalContext = None | |
singleTriples = None | |
globalTriples = None | |
if cache_map_geonames is not None: | |
if word in cache_map_geonames: | |
if context in cache_map_geonames[word]: | |
url_text = cache_map_geonames[word][context] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", url_text, "\n") | |
return url_text, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames | |
#url = f"http://api.geonames.org/search?name_equals={word}&maxRows=1&featureClass=A&featureCode=ADM1&featureCode=ADM2&featureCode=ADM3&featureCode=ADM4&featureCode=ADM5&type=json&username={key_geonames}" | |
url = f"http://api.geonames.org/search?name_equals={word}&maxRows=1&type=json&username={key_geonames}" | |
response = requests.get(url) | |
try: | |
data = response.json() | |
if data['geonames']: | |
# geoname = data['geonames'][0]['name'] | |
geonameId = data['geonames'][0]['geonameId'] | |
geonameUrl = "https://sws.geonames.org/" + str(geonameId) + "/" | |
if cache_map_geonames is not None: | |
if not word in cache_map_geonames: | |
cache_map_geonames[word] = {} | |
cache_map_geonames[word][context] = geonameUrl | |
return geonameUrl, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames | |
else: | |
if cache_map_geonames is not None: | |
if not word in cache_map_geonames: | |
cache_map_geonames[word] = {} | |
cache_map_geonames[word][context] = None | |
return None, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames | |
except Exception as err: | |
# if cache_map_geonames is not None: | |
# if not word in cache_map_geonames: | |
# cache_map_geonames[word] = {} | |
# cache_map_geonames[word][context] = None | |
return None, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames | |
def getLinearTextualContextFromTriples(word,labelTriplesLIST, text_splitter, args, map_query_input_output, cleanInput=True, questionText=""): | |
# trial | |
#return None, map_query_input_output | |
word = word.lower() | |
word = word.capitalize() | |
labelTriples="" | |
if labelTriplesLIST and getattr(args, 'maxTriplesContextComputation', None): # it means it exists | |
if args.maxTriplesContextComputation > 0: | |
if len(labelTriplesLIST) > args.maxTriplesContextComputation: | |
labelTriplesLIST = labelTriplesLIST[:args.maxTriplesContextComputation] | |
if (strtobool(args.UseRetrieverForContextCreation) == True): | |
# if strtobool(args.debug): | |
# print("Start reranking - num passages : ", len(labelTriplesLIST), "\n") | |
# startRerank = time.time() | |
# | |
# labelTriples = "" | |
# passages = [] | |
# nn = 200 | |
# | |
# OverallListRAGtriples = [] | |
# labelTriplesLIST_RAGGED = [] | |
# | |
# if len(labelTriplesLIST) <= nn: | |
# passages = [] | |
# for i, triple in enumerate(labelTriplesLIST, start=1): | |
# # for triple in labelTriplesLIST: | |
# TriplesString = (" ".join(str(element).capitalize() for element in triple)) | |
# passages.append(TriplesString) | |
# | |
# df_retrieved = RAG_retrieval_TopK(questionText, passages, top_fraction=0.1, max_num_passages=20, | |
# min_threshold=0.7) | |
# | |
# if not df_retrieved.empty: | |
# # labelTriplesLIST_RAGGED = df_retrieved.to_records(index=False).tolist() | |
# labelTriplesLIST_RAGGED = df_retrieved['Passage'].apply(lambda x: (x,)).tolist() | |
# labelTriplesAPP = ". ".join( | |
# " ".join(str(element).capitalize() for element in triple) for triple in labelTriplesLIST_RAGGED) | |
# | |
# if not labelTriples: | |
# labelTriples = labelTriplesAPP | |
# else: | |
# labelTriples = labelTriples + ". " + labelTriplesAPP | |
# | |
# else: | |
# | |
# OverallListRAGtriples = labelTriplesLIST.copy() | |
# | |
# while len(OverallListRAGtriples) > nn: | |
# Oinnerlistiterative = [] | |
# for i, triple in enumerate(OverallListRAGtriples, start=1): | |
# # for triple in labelTriplesLIST: | |
# TriplesString = (" ".join(str(element).capitalize() for element in triple)) | |
# passages.append(TriplesString) | |
# # Check if the current index is a multiple of nn | |
# if i % nn == 0: | |
# # print("elaborate RAG triples") | |
# | |
# # df_retrieved_Base = RAG_retrieval_Base(questionText, passages, min_threshold=0.7, max_num_passages=20) | |
# # df_retrievedZscore = RAG_retrieval_Z_scores(questionText, passages, z_threshold=1.0, max_num_passages=20, min_threshold=0.7) | |
# # df_retrievedPercentile = RAG_retrieval_Percentile(questionText, passages, percentile=90, max_num_passages=20, min_threshold=0.7) | |
# df_retrievedtopk = RAG_retrieval_TopK(questionText, passages, top_fraction=0.1, | |
# max_num_passages=20, | |
# min_threshold=0.7) | |
# | |
# passages = [] | |
# | |
# df_retrieved = df_retrievedtopk.copy() | |
# if not df_retrieved.empty: | |
# # labelTriplesLIST_RAGGED = df_retrieved.to_records(index=False).tolist() | |
# labelTriplesLIST_RAGGED = df_retrieved['Passage'].apply(lambda x: (x,)).tolist() | |
# if not Oinnerlistiterative: | |
# Oinnerlistiterative = labelTriplesLIST_RAGGED | |
# else: | |
# Oinnerlistiterative.extend(labelTriplesLIST_RAGGED) | |
# | |
# if passages: | |
# df_retrievedtopk = RAG_retrieval_TopK(questionText, passages, top_fraction=0.1, max_num_passages=20, | |
# min_threshold=0.7) | |
# | |
# df_retrieved = df_retrievedtopk.copy() | |
# if not df_retrieved.empty: | |
# # labelTriplesLIST_RAGGED = df_retrieved.to_records(index=False).tolist() | |
# labelTriplesLIST_RAGGED = df_retrieved['Passage'].apply(lambda x: (x,)).tolist() | |
# if not Oinnerlistiterative: | |
# Oinnerlistiterative = labelTriplesLIST_RAGGED | |
# else: | |
# Oinnerlistiterative.extend(labelTriplesLIST_RAGGED) | |
# | |
# OverallListRAGtriples = Oinnerlistiterative.copy() | |
# | |
# if OverallListRAGtriples: | |
# labelTriplesAPP = ". ".join( | |
# " ".join(str(element).capitalize() for element in triple) for triple in OverallListRAGtriples) | |
# | |
# if not labelTriples: | |
# labelTriples = labelTriplesAPP | |
# else: | |
# labelTriples = labelTriples + ". " + labelTriplesAPP | |
# | |
# labelTriples = labelTriples.strip().replace("..", ".").strip() | |
# | |
# # labelTriples = "" | |
# # passages = [] | |
# # nn=200 | |
# # for i, triple in enumerate(labelTriplesLIST, start=1): | |
# # #for triple in labelTriplesLIST: | |
# # TriplesString = (" ".join(str(element).capitalize() for element in triple)) | |
# # passages.append(TriplesString) | |
# # # Check if the current index is a multiple of nn | |
# # if i % nn == 0: | |
# # #print("elaborate RAG triples") | |
# # | |
# # #df_retrieved_Base = RAG_retrieval_Base(questionText, passages, min_threshold=0.7, max_num_passages=20) | |
# # #df_retrievedZscore = RAG_retrieval_Z_scores(questionText, passages, z_threshold=1.0, max_num_passages=20, min_threshold=0.7) | |
# # #df_retrievedPercentile = RAG_retrieval_Percentile(questionText, passages, percentile=90, max_num_passages=20, min_threshold=0.7) | |
# # df_retrievedtopk = RAG_retrieval_TopK(questionText, passages, top_fraction=0.1, max_num_passages=20, min_threshold=0.7) | |
# # | |
# # passages = [] | |
# # | |
# # df_retrieved = df_retrievedtopk.copy() | |
# # if not df_retrieved.empty: | |
# # #labelTriplesLIST_RAGGED = df_retrieved.to_records(index=False).tolist() | |
# # labelTriplesLIST_RAGGED = df_retrieved['Passage'].apply(lambda x: (x,)).tolist() | |
# # labelTriplesAPP = ". ".join(" ".join(str(element).capitalize() for element in triple) for triple in labelTriplesLIST_RAGGED) | |
# # | |
# # if not labelTriples: | |
# # labelTriples =labelTriplesAPP | |
# # else: | |
# # labelTriples = labelTriples + ". " + labelTriplesAPP | |
# # | |
# # if passages: | |
# # df_retrievedtopk = RAG_retrieval_TopK(questionText, passages, top_fraction=0.1, max_num_passages=20, min_threshold=0.7) | |
# # | |
# # df_retrieved = df_retrievedtopk.copy() | |
# # if not df_retrieved.empty: | |
# # #labelTriplesLIST_RAGGED = df_retrieved.to_records(index=False).tolist() | |
# # labelTriplesLIST_RAGGED = df_retrieved['Passage'].apply(lambda x: (x,)).tolist() | |
# # labelTriplesAPP = ". ".join(" ".join(str(element).capitalize() for element in triple) for triple in labelTriplesLIST_RAGGED) | |
# # if not labelTriples: | |
# # labelTriples = labelTriplesAPP | |
# # else: | |
# # labelTriples = labelTriples + ". " + labelTriplesAPP | |
# # | |
# # if labelTriples: | |
# # labelTriples.strip().replace("..",".").strip() | |
# | |
# if strtobool(args.debug): | |
# numfinal = 0 | |
# if OverallListRAGtriples: | |
# numfinal = len(OverallListRAGtriples) | |
# elif labelTriplesLIST_RAGGED: | |
# numfinal = len(labelTriplesLIST_RAGGED) | |
# print("End reranking - found final passages : ", numfinal, "\n") | |
# # | |
# endRerank = time.time() | |
# hours, rem = divmod(endRerank - startRerank, 3600) | |
# minutes, seconds = divmod(rem, 60) | |
# print("Rerank Time... {:0>2}:{:0>2}:{:05.2f}\n".format(int(hours), int(minutes), seconds)) | |
# # | |
# if len(labelTriplesLIST) > 10000: | |
# print("debug") | |
if strtobool(args.debug): | |
print("Start reranking2 - num passages : ", len(labelTriplesLIST), "\n") | |
startRerank2 = time.time() | |
labelTriples = "" | |
try: | |
passages = [] | |
for i, triple in enumerate(labelTriplesLIST, start=1): | |
# for triple in labelTriplesLIST: | |
TriplesString = (" ".join(str(element).capitalize() for element in triple)) | |
passages.append(TriplesString) | |
nback = 1 | |
if len(passages) <= 10: | |
nback = len(passages) | |
elif len(passages) <= 1000: | |
nback = 10+int(0.1 * len(passages)) # 10% of the number of passages | |
elif len(passages) <= 5000: | |
nback = 200 | |
elif len(passages) <= 10000: | |
nback = 300 | |
else: | |
nback = 400 | |
df_retrieved = RAG_retrieval_Base(questionText, passages, min_threshold=0, max_num_passages=nback) | |
if not df_retrieved.empty: | |
countRetr = 0 | |
min_threshold = 0.80 | |
countRetr = (df_retrieved['score'] > min_threshold).sum() | |
countRetrThreshold = int(nback / 2) | |
if nback > 10: | |
countRetrThreshold = 10 | |
else: | |
countRetrThreshold = int(nback/2) | |
if countRetrThreshold <=0: | |
countRetrThreshold = 1 | |
while countRetr <= countRetrThreshold: | |
min_threshold = min_threshold - 0.05 | |
countRetr = (df_retrieved['score'] >= min_threshold).sum() | |
if min_threshold < 0.2: | |
break | |
# countRetrThreshold = int(0.1 + nback) | |
# if countRetrThreshold > 5: | |
# countRetrThreshold = 5 | |
# | |
# countRetr=0 | |
# min_threshold = 0.90 | |
# countRetr = (df_retrieved['score'] > min_threshold).sum() | |
# while countRetr<=countRetrThreshold: | |
# min_threshold = min_threshold - 0.05 | |
# if min_threshold<0.7: | |
# countRetrThreshold=0 | |
# if min_threshold == 0: | |
# min_threshold = 0.01 | |
# countRetr = (df_retrieved['score'] > min_threshold).sum() | |
# if min_threshold <= 0.01: | |
# break | |
if countRetr > 0: | |
df_retrieved = df_retrieved[df_retrieved['score'] > min_threshold] | |
# labelTriplesLIST_RAGGED = df_retrieved.to_records(index=False).tolist() | |
labelTriplesLIST_RAGGED = df_retrieved['Passage'].apply(lambda x: (x,)).tolist() | |
labelTriplesAPP = ". ".join( | |
" ".join(str(element).capitalize() for element in triple) for triple in labelTriplesLIST_RAGGED) | |
if not labelTriples: | |
labelTriples = labelTriplesAPP | |
else: | |
labelTriples = labelTriples + ". " + labelTriplesAPP | |
else: | |
labelTriplesLIST_RAGGED = [] | |
labelTriples = "" | |
if strtobool(args.debug): | |
numfinal = 0 | |
if labelTriplesLIST_RAGGED: | |
numfinal = len(labelTriplesLIST_RAGGED) | |
print("End reranking2 - found final passages : ", numfinal, "\n") | |
endRerank2 = time.time() | |
hours, rem = divmod(endRerank2 - startRerank2, 3600) | |
minutes, seconds = divmod(rem, 60) | |
print("Rerank2 Time... {:0>2}:{:0>2}:{:05.2f}\n".format(int(hours), int(minutes), seconds)) | |
# | |
except Exception as err: | |
print("SOMETHING HAPPENED on PASSAGE RERANKING for Question :"+questionText+"\n") | |
print(err) | |
#status_code: 422, body: type='validation_error' url='https://www.mixedbread.ai/api-reference' message='Your request is invalid. Please check your input and try again.' details=[[{'type': 'too_long', 'loc': ['body', 'input', 'list[str]'], 'msg': 'List should have at most 1000 items after validation, not 4249', | |
else: # NO RAG on triples | |
labelTriples = ". ".join(" ".join(str(element).capitalize() for element in triple) for triple in labelTriplesLIST) | |
if not(labelTriples) or labelTriples.strip=="": | |
logging.warning("getLinearTextualContextFromTriples - No text or prompt supplied! No relevant contextual triples retrieved...Skypping it! Word: "+str(word)) | |
return "", map_query_input_output | |
if token_counter(labelTriples, args.model_name) > args.tokens_max: # THE CONTEXT IS TOO BIG, BIGGER THAN tokens_max, I need to split | |
texts = text_splitter.create_documents([labelTriples]) | |
labelTriples = texts[0].page_content | |
if not (labelTriples) or labelTriples.strip == "": | |
logging.warning("after splitting ...No text or prompt supplied! Skypping it! Word: "+str(word)) | |
return "", map_query_input_output | |
contextText = "" | |
if (strtobool(args.UseRetrieverForContextCreation) == True): | |
contextText = labelTriples | |
else: #USE the LLM for summarise the triples | |
# Can you elaborate and express better the following notes, delimited by triple backticks, about "{word}"? | |
# Don't add explanations for your answer. Do not invent. Don't use a structure or indenting. Be concise. Don't discard relevant information. | |
# made of RDF-like statements, | |
# myPromt = f""" | |
# Can you elaborate and express better the given notes below, delimited by triple backticks, about "{word}"? | |
# Don't add explanations for your answer. | |
# Do not invent. | |
# Don't use a structure or indenting. | |
# Be concise but exhaustive. Don't discard information reported in the notes. | |
# """ | |
myPromt = f""" | |
Can you reformulate the following notes, provided between triple backticks, into clear and complete sentences about "{word}"? | |
Ensure the rewriting is human-readable and easily interpretable. Maintain conciseness and exhaustiveness, including all information from the notes. | |
Avoid using note formats or lists, and refrain from inventing additional information. | |
""" | |
myDelimiter = "```" | |
if cleanInput==True: | |
labelTriples = cleanInputText(labelTriples) | |
# try to read cache | |
if map_query_input_output is not None: | |
key = args.model_name + "__" + str(args.temperature) + "__" + myPromt | |
if key in map_query_input_output: | |
if labelTriples in map_query_input_output[key]: | |
output = map_query_input_output[key][labelTriples] | |
# if input_text.strip() == "": | |
# print("here") | |
# if handler == api_call_dglc: | |
# output = clean_gpt_out(output) #clean output | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", myPromt, "\n", myDelimiter, word, myDelimiter, "\n=>\n", output, "\n") | |
return output, map_query_input_output | |
# call | |
try: | |
contextText = "" | |
if args.service_provider == "gptjrc": | |
contextText = call_model(input_text=labelTriples, prompt=myPromt, model=args.model_name, | |
temperature=args.temperature, delimiter=myDelimiter, | |
InContextExamples=[], | |
handler=api_call_gptjrc, | |
verbose=True, args=args) | |
# elif args.service_provider == "HFonPremises": | |
# contextText = call_model(input_text=labelTriples, prompt=myPromt, model=args.model_name, | |
# temperature=args.temperature, delimiter=myDelimiter, | |
# InContextExamples=[], | |
# handler=api_call_HFonPremises, | |
# verbose=True, args=args) | |
if contextText: | |
if not isinstance(contextText, str): | |
contextText = contextText['choices'][0]['message']['content'] | |
if map_query_input_output is not None: | |
if not key in map_query_input_output: | |
map_query_input_output[key] = {} | |
if contextText: | |
if contextText != "": | |
map_query_input_output[key][labelTriples] = contextText | |
except Exception as err: | |
return None, map_query_input_output | |
return contextText, map_query_input_output | |
def getUrlBioAndAllOtherBioConcepts(word, args, key_virtuoso, cache_map_virtuoso, endpoint, VirtuosoUsername, contextWordVirtuoso, UseBioportalForLinking=False, questionText="" ): | |
#UseBioportalForLinking = False #trial to del | |
if strtobool(args.debug): | |
print("--- start getUrlBioAndAllOtherBioConcepts for " + word.lower()) | |
entityBioeUrl = None | |
ALLURIScontext = [] | |
key_bioportal = "" | |
if args.bioportalkey_filename and os.path.exists(args.bioportalkey_filename): | |
fkeyname = args.bioportalkey_filename | |
with open(fkeyname) as f: | |
key_bioportal = f.read() | |
else: | |
key_bioportal = os.environ['key_bioportal'] | |
# Check if args.KG_restriction exists and is not empty | |
if getattr(args, 'KG_restriction', None): | |
# api call | |
if strtobool(args.debug): | |
print("--- " + word.lower()) | |
# args.KG_restriction exists and is not empty | |
if strtobool(args.debug): | |
print("KG_restriction is provided and not empty:", args.KG_restriction) | |
from_clauses = ' '.join([f"FROM <{choice}>" for choice in args.KG_restriction]) | |
# Construct the full SPARQL query | |
query = f""" | |
prefix skosxl: <http://www.w3.org/2008/05/skos-xl#> | |
SELECT ?concept ?label (COUNT(?edge) AS ?score) | |
{from_clauses} | |
WHERE {{ | |
?concept skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?label . | |
FILTER (LCASE(STR(?label)) = "{word.lower()}") | |
?concept ?edge ?o . | |
}} | |
GROUP BY ?concept ?label | |
ORDER BY DESC(?score) | |
""" | |
### this is for Bioportal url api: | |
onto_clauses = "" | |
# for choice in args.KG_restriction: | |
# if choice == "SNOMEDCT": | |
# choice = "SNOMED" | |
# elif choice == "OBOREL": | |
# choice = "RO" | |
# elif choice == "PTRANS": | |
# choice = "TRANS" | |
# elif choice == "FOODON": | |
# choice = "FoodOn" | |
# elif choice == "GEOSPARQL": | |
# choice = "GeoSPARQL" | |
# elif choice == "NCBITAXON": | |
# choice = "NCBITAXON,NCBITaxon_" | |
# onto_clauses = onto_clauses + choice + "," | |
for choice in args.KG_restriction: | |
if choice == "SNOMED": | |
choice="SNOMEDCT" | |
elif choice == "RO": | |
choice = "OBOREL" | |
elif choice == "TRANS": | |
choice = "PTRANS" | |
elif choice == "FoodOn": | |
choice = "FOODON" | |
elif choice == "GeoSPARQL": | |
choice = "GEOSPARQL" | |
# elif choice == "NCBITAXON": | |
# choice = "NCBITAXON,NCBITaxon_" | |
elif choice == "NCBITaxon_": | |
choice = "NCBITAXON" | |
if choice in ONLY_Ontologies_OnBIOPORTAL: | |
onto_clauses=onto_clauses+choice+"," | |
if onto_clauses and onto_clauses[-1] == ",": | |
onto_clauses = onto_clauses[:-1] | |
url = f"https://services.data.bioontology.org/annotatorplus/?text={word.lower()}&ontologies={onto_clauses}&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}" | |
else: | |
# args.KG_restriction does not exist or is empty | |
kg_restriction = getattr(args, 'KG_restriction', None) | |
if kg_restriction is not None and len(kg_restriction) == 0: | |
print("KG_restriction is provided but empty") | |
return None, None, cache_map_virtuoso | |
if strtobool(args.debug): | |
print("--- " + word.lower()) | |
print("KG_restriction is not provided or empty - Consider all the KGs in the virtuoso endpoint") | |
query = f""" | |
prefix skosxl: <http://www.w3.org/2008/05/skos-xl#> | |
SELECT ?concept ?label (COUNT(?edge) AS ?score) | |
WHERE {{ | |
?concept skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?label . | |
FILTER (LCASE(STR(?label)) = "{word.lower()}") | |
?concept ?edge ?o . | |
}} | |
GROUP BY ?concept ?label | |
ORDER BY DESC(?score) | |
""" | |
### this is for Bioportal url api: | |
url = f"https://services.data.bioontology.org/annotatorplus/?text={word.lower()}&ontologies=AEO,BFO,BIM,BCGO,CL,CHIRO,CHEBI,DCM,FMA,GO,GENO,GEOSPARQL,HL7,DOID,HP,HP_O,IDO,IAO,ICD10,LOINC,MESH,MONDO,NCIT,NCBITAXON,NIFCELL,NIFSTD,GML,OBCS,OCHV,OHPI,OPB,PTRANS,PLOSTHES,RADLEX,OBOREL,STY,SO,SNOMEDCT,STATO,SYMP,FOODON,UBERON,VO&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}" | |
try: | |
if UseBioportalForLinking == False: | |
if strtobool(args.debug): | |
print("Use Virtuoso Sparql endpoint for linking ... " + word.lower()) | |
responseText = sparqlQuery(endpoint, questionText, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE)) | |
# Parse the response as JSON | |
results = json.loads(responseText) | |
if len(results) > 0 and results['results']['bindings']: | |
entityBioeUrl = str(results['results']['bindings'][0]['concept']['value']) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word][contextWordVirtuoso] = entityBioeUrl | |
# # loop the results | |
for result in results['results']['bindings']: | |
# print(result) | |
contextConcept = result['concept']['value'] | |
if contextConcept not in ALLURIScontext: | |
ALLURIScontext.append(contextConcept) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word][contextConcept] = None | |
if ALLURIScontext and isinstance(ALLURIScontext, list): | |
ALLURIScontext = list(set(ALLURIScontext)) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word]['ALLURIScontext'] = ALLURIScontext | |
else: | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word][contextWordVirtuoso] = None | |
cache_map_virtuoso[word]['ALLURIScontext'] = [] | |
else: #this is instead using Bioportal API for linking | |
if strtobool(args.debug): | |
print("Use Bioportal for linking ... " + word.lower()) | |
response = requests.get(url) | |
try: | |
data = response.json() | |
if not data: | |
# nothing found from Bioportal | |
#return None, None, None, None, None, cache_map_virtuoso | |
return None, None, cache_map_virtuoso | |
dff = pd.DataFrame(data) | |
dff = dff.drop(columns=['hierarchy', 'mappings']) | |
# If the columns are dictionary-like, use pd.json_normalize: | |
expanded_annotated_class = pd.json_normalize(dff['annotatedClass']) | |
expanded_annotations = pd.DataFrame(dff['annotations'].tolist(), index=dff.index) | |
expanded_annotations = pd.json_normalize(expanded_annotations[0]) | |
# Join the expanded columns back to the original DataFrame | |
df_expanded = dff.drop(columns=['annotatedClass', 'annotations']).join(expanded_annotated_class).join( | |
expanded_annotations) | |
# Snomed id replacement because in our internal knolwedgebase we have this base uri | |
df_expanded['@id'] = df_expanded['@id'].str.replace( | |
"http://purl.bioontology.org/ontology/SNOMEDCT/", | |
"http://snomed.info/id/" | |
) | |
if not df_expanded.empty: | |
df_expanded = df_expanded.sort_values( | |
by=['from', 'text', 'score', 'matchType'], ascending=[True, True, False, False]) | |
df_expanded = df_expanded.drop_duplicates(subset=['@id']) | |
# Filter rows where 'prefLabel' is exactly equal to 'word.lower()' or 'word.lower()' is in 'synonym' | |
# filtered_df = df_expanded[ | |
# df_expanded['prefLabel'].str.lower() == word.lower() | | |
# df_expanded['synonym'].apply( | |
# lambda x: True if isinstance(x, list) and len(x) > 0 and word.lower() in [item.lower() for | |
# item in x] else ( | |
# np.nan if x is None or (isinstance(x, float) and pd.isna(x)) else x)) | |
# ] | |
if 'prefLabel' in df_expanded.columns: | |
if 'synonym' in df_expanded.columns: | |
df_expanded = df_expanded[ | |
df_expanded['prefLabel'].apply( | |
lambda x: isinstance(x, str) and x.lower() == word.lower() | |
) | | |
df_expanded['synonym'].apply( | |
lambda x: isinstance(x, list) and any(item.lower() == word.lower() for item in x) | |
) | |
] | |
else: | |
df_expanded = df_expanded[ | |
df_expanded['prefLabel'].apply( | |
lambda x: isinstance(x, str) and x.lower() == word.lower() | |
) | |
] | |
if df_expanded.empty: | |
# nothing found from Bioportal | |
#return None, None, None, None, None, cache_map_virtuoso | |
return None, None, cache_map_virtuoso | |
# Specify the columns you want to keep | |
columns_to_keep = ["score", "from", "to", "prefLabel", "text", "@id"] | |
# Subset the dataframe to keep only the specified columns | |
df_expanded = df_expanded[columns_to_keep] | |
# Rename the specified columns | |
df_expanded = df_expanded.rename( | |
columns={"from": "start", "to": "end", "text": "word"}) | |
# Optional: Reset index if you want a clean index after filtering | |
df_expanded = df_expanded.reset_index(drop=True) | |
df_expanded['score'] = df_expanded['score'].round(2) | |
# Find the index of the row with the maximum 'score' | |
max_score_index = df_expanded['score'].idxmax() | |
max_score_row = df_expanded.loc[df_expanded['score'].idxmax()] | |
entityBioeUrl = str(max_score_row['@id']) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word][contextWordVirtuoso] = entityBioeUrl | |
# Drop the row with the maximum 'score' | |
#df_expanded = df_expanded.drop(max_score_index) | |
# Reset the index if desired (optional) | |
df_expanded.reset_index(drop=True, inplace=True) | |
# Iterate over each row in the DataFrame | |
for index, row in df_expanded.iterrows(): | |
# Append the '@id' value to the list | |
if row['@id'] is not None and pd.notna(row['@id']): | |
contextConcept=row['@id'] | |
ALLURIScontext.append(contextConcept) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word][contextConcept] = None | |
if ALLURIScontext and isinstance(ALLURIScontext, list): | |
ALLURIScontext = list(set(ALLURIScontext)) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
cache_map_virtuoso[word]['ALLURIScontext'] = ALLURIScontext | |
return entityBioeUrl, ALLURIScontext, cache_map_virtuoso | |
else: | |
#nothing found from Bioportal | |
#return None, None, None, None, None, cache_map_virtuoso | |
return None, None, cache_map_virtuoso | |
except Exception as err: | |
logging.error( | |
f'ERROR ON BioPortal Annotator API Call\n\tError: {err}\n TextToAnnotate: {word.lower()}\n Have a check...') | |
#return None, None, None, None, None, cache_map_virtuoso | |
return None, None, cache_map_virtuoso | |
except Exception as err: | |
# if cache_map_virtuoso is not None: | |
# if not word in cache_map_virtuoso: | |
# cache_map_virtuoso[word] = {} | |
# cache_map_virtuoso[word][contextWordVirtuoso] = None | |
#return None, None, None, None, None, cache_map_virtuoso | |
return None, None, cache_map_virtuoso | |
return entityBioeUrl, ALLURIScontext, cache_map_virtuoso | |
#@mem.cache | |
def virtuoso_api_call(word, text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, id=None, iALLURIScontextFromNCBO=None,UseBioportalForLinking=True,questionText=""): | |
if strtobool(args.debug): | |
print(f"\n----- Starting virtuoso_api_call for {word}") | |
word = word.lower() | |
word = strip_quotes(word) | |
endpoint = 'https://api-vast.jrc.service.ec.europa.eu/sparql' | |
VirtuosoUsername = 'dba' | |
if getattr(args, 'KG_restriction', None): | |
contextWordVirtuoso = ', '.join(sorted(args.KG_restriction)) | |
else: | |
contextWordVirtuoso = "" | |
singleContext = None | |
globalContext = None | |
sssingleTriples = None | |
ggglobalTriples = None | |
unique_listLabelTriples = [] | |
unique_listGlobalTriples = [] | |
ALLURIScontext = [] | |
url_text = None | |
if id: | |
url_text = id | |
if iALLURIScontextFromNCBO and isinstance(iALLURIScontextFromNCBO, list): | |
ALLURIScontext=iALLURIScontextFromNCBO | |
ALLURIScontext = list(set(ALLURIScontext)) | |
if (cache_map_virtuoso is not None) and (not url_text): | |
if word in cache_map_virtuoso: | |
if contextWordVirtuoso in cache_map_virtuoso[word]: | |
url_text = cache_map_virtuoso[word][contextWordVirtuoso] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", url_text, "\n") | |
if not url_text: | |
return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output | |
if url_text and not ALLURIScontext: | |
if cache_map_virtuoso is not None: | |
if word in cache_map_virtuoso: | |
if 'ALLURIScontext' in cache_map_virtuoso[word]: | |
ALLURIScontext = cache_map_virtuoso[word]['ALLURIScontext'] | |
entityBioeUrl = None | |
if url_text and ALLURIScontext: | |
entityBioeUrl = url_text | |
else: | |
try: | |
entityBioeUrl, ALLURIScontext, cache_map_virtuoso = getUrlBioAndAllOtherBioConcepts(word, args, key_virtuoso, cache_map_virtuoso, endpoint, VirtuosoUsername, contextWordVirtuoso, UseBioportalForLinking=UseBioportalForLinking, questionText=questionText ) | |
if ALLURIScontext and isinstance(ALLURIScontext, list): | |
ALLURIScontext = list(set(ALLURIScontext)) | |
except Exception as err: | |
# if cache_map_virtuoso is not None: | |
# if not word in cache_map_virtuoso: | |
# cache_map_virtuoso[word] = {} | |
# cache_map_virtuoso[word][contextWordVirtuoso] = None | |
return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output | |
if entityBioeUrl: | |
if strtobool(args.computeEntityContext) and (strtobool(args.computeEntityGlobalContext)==False): | |
if strtobool(args.debug): | |
print("START computeEntityContext") | |
unique_listLabelTriples = [] | |
singleContext = None | |
if cache_map_virtuoso is not None: | |
if entityBioeUrl in cache_map_virtuoso: | |
if "LabelTriples" in cache_map_virtuoso[entityBioeUrl]: | |
unique_listLabelTriples = cache_map_virtuoso[entityBioeUrl]["LabelTriples"] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", entityBioeUrl, " => ", "LabelTriples", "\n") | |
if ("SingleContext" in cache_map_virtuoso[entityBioeUrl]) and (strtobool(args.UseRetrieverForContextCreation)==False): | |
singleContext = cache_map_virtuoso[entityBioeUrl]["SingleContext"] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", entityBioeUrl, " => ", "SingleContext", "\n") | |
if not singleContext: | |
if unique_listLabelTriples: | |
singleContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, unique_listLabelTriples, | |
text_splitter, args, | |
load_map_query_input_output,cleanInput=True,questionText=questionText) | |
else: | |
query = f""" | |
prefix skosxl: <http://www.w3.org/2008/05/skos-xl#> | |
SELECT DISTINCT ?labelS ?labelP ?labelO | |
WHERE {{ | |
{{ | |
<{entityBioeUrl}> ?p ?o. | |
<{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelS . | |
?p skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelP . | |
?o skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelO . | |
}} | |
UNION | |
{{ | |
SELECT ?labelS ?labelP ?labelO | |
WHERE {{ | |
<{entityBioeUrl}> ?p ?labelO . | |
<{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelS . | |
?p skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelP . | |
FILTER (isLiteral(?labelO)) | |
}} | |
}} | |
UNION | |
{{ | |
SELECT DISTINCT ?labelS ?labelP ?labelO | |
WHERE {{ | |
<{entityBioeUrl}> ?ppp ?ooo . | |
?ooo rdf:type owl:Restriction . | |
?ooo owl:onProperty ?p . | |
?ooo owl:someValuesFrom ?o . | |
<{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelS . | |
?p skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelP . | |
?o skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelO . | |
}} | |
}} | |
}} | |
""" | |
try: | |
responseText = sparqlQuery(endpoint, query, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE)) | |
# Parse the response as JSON | |
results = json.loads(responseText) | |
if len(results) > 0 and results['results']['bindings']: | |
# word = "subClassOf" | |
# split_word = split_camel_case(word) | |
# # loop the results | |
labelTriples="" | |
listLabelTriples = [] | |
pattern = r'\^\^<http:.*?>' | |
for result in results['results']['bindings']: | |
#print(result) | |
ss = str(result['labelS']['value']).strip().replace("..",".").replace("@en","") | |
ss = re.sub(pattern, '', ss) | |
pp = split_camel_case(str(result['labelP']['value'])).replace("_"," ").strip().replace("..",".").replace("@en","") | |
pp = re.sub(pattern, '', pp) | |
oo = str(result['labelO']['value']).replace("_"," ").strip().replace("..",".").replace("@en","") | |
oo = re.sub(pattern, '', oo) | |
listLabelTriples.append([ss, pp, oo]) | |
# Remove duplicates while preserving order | |
unique_listLabelTriples = list(dict.fromkeys(tuple(triple) for triple in listLabelTriples)) | |
# # If you need the result back in list of lists format | |
# unique_listLabelTriples = [list(triple) for triple in unique_listLabelTriples] | |
if unique_listLabelTriples: | |
if cache_map_virtuoso is not None: | |
if not entityBioeUrl in cache_map_virtuoso: | |
cache_map_virtuoso[entityBioeUrl] = {} | |
cache_map_virtuoso[entityBioeUrl]["LabelTriples"] = unique_listLabelTriples | |
singleContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, unique_listLabelTriples, text_splitter, args, load_map_query_input_output,cleanInput=True,questionText=questionText) | |
except Exception as err: | |
singleContext = None | |
if singleContext and (strtobool(args.UseRetrieverForContextCreation)==False): | |
if cache_map_virtuoso is not None: | |
if not entityBioeUrl in cache_map_virtuoso: | |
cache_map_virtuoso[entityBioeUrl] = {} | |
cache_map_virtuoso[entityBioeUrl]["SingleContext"] = singleContext | |
if strtobool(args.computeEntityGlobalContext): | |
if strtobool(args.debug): | |
print("START computeEntityGlobalContext") | |
unique_listGlobalTriples = [] | |
globalContext = None | |
if cache_map_virtuoso is not None: | |
if word in cache_map_virtuoso: | |
if ("GlobalTriples"+" "+contextWordVirtuoso).strip() in cache_map_virtuoso[word]: | |
unique_listGlobalTriples = cache_map_virtuoso[word][("GlobalTriples"+" "+contextWordVirtuoso).strip()] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", ("GlobalTriples"+" "+contextWordVirtuoso).strip(), "\n") | |
if (("GlobalContext"+" "+contextWordVirtuoso).strip() in cache_map_virtuoso[word]) and (strtobool(args.UseRetrieverForContextCreation)==False): | |
globalContext = cache_map_virtuoso[word][("GlobalContext"+" "+contextWordVirtuoso).strip()] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", ("GlobalContext"+" "+contextWordVirtuoso).strip(), "\n") | |
if not globalContext: | |
BreakenBeforeAll = False | |
if unique_listGlobalTriples: | |
globalContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, unique_listGlobalTriples, | |
text_splitter, args, | |
load_map_query_input_output,cleanInput=True,questionText=questionText) | |
else: | |
if not ALLURIScontext: | |
if cache_map_virtuoso is not None: | |
if word in cache_map_virtuoso: | |
ALLURIScontext = list(cache_map_virtuoso[word].keys()) | |
ALLURIScontext = [element for element in ALLURIScontext if element and ("GlobalTriples" in element == False) and ("GlobalContext" in element == False) and "http" in element ] | |
if not ALLURIScontext: | |
# THIS CASE SHOULD BE VERY DIFFICULT TO HAPPEN...IT WILL HAPPEN IN CASE IT COMES FROM AN INITIAL BIOPORTAL ANNOTATION; WHICH ALREADY RECOGNISED THE FIRST URL | |
# # Print the error message to stderr | |
# print("THIS CASE SHOULD NEVER HAPPEN!!!! Check what's happening...exiting now...") | |
# # Exit the program with a non-zero status code (commonly used to indicate an error) | |
# sys.exit(1) | |
try: | |
entityBioeUrl, ALLURIScontext, cache_map_virtuoso = getUrlBioAndAllOtherBioConcepts(word, | |
args, | |
key_virtuoso, | |
cache_map_virtuoso, | |
endpoint, | |
VirtuosoUsername, | |
contextWordVirtuoso, | |
UseBioportalForLinking=UseBioportalForLinking, | |
questionText=questionText) | |
if ALLURIScontext and isinstance(ALLURIScontext, list): | |
ALLURIScontext = list(set(ALLURIScontext)) | |
except Exception as err: | |
# if cache_map_virtuoso is not None: | |
# if not word in cache_map_virtuoso: | |
# cache_map_virtuoso[word] = {} | |
# cache_map_virtuoso[word][contextWordVirtuoso] = None | |
return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output | |
if not ALLURIScontext: | |
# Print the error message to stderr | |
print("THIS CASE SHOULD NEVER HAPPEN NOW!!!! Check what's happening...exiting now...") | |
# Exit the program with a non-zero status code (commonly used to indicate an error) | |
sys.exit(1) | |
else: | |
for xxUrl in ALLURIScontext: | |
unique_listLabelTriples = [] | |
# singleContext = None | |
if cache_map_virtuoso is not None: | |
if xxUrl in cache_map_virtuoso: | |
if "LabelTriples" in cache_map_virtuoso[xxUrl]: | |
unique_listLabelTriples = cache_map_virtuoso[xxUrl]["LabelTriples"] | |
if strtobool(args.debug): | |
print("RETRIEVED CACHED RESULT FOR:\n", xxUrl, " => ", | |
"LabelTriples", "\n") | |
# if "SingleContext" in cache_map_virtuoso[xxUrl] and (strtobool(args.UseRetrieverForContextCreation)==False): | |
# singleContext = cache_map_virtuoso[xxUrl]["SingleContext"] | |
# if strtobool(args.debug): | |
# print("RETRIEVED CACHED RESULT FOR:\n", xxUrl, " => ", | |
# "SingleContext", "\n") | |
# if not singleContext: | |
# if unique_listLabelTriples: | |
# singleContext, load_map_query_input_output = getLinearTextualContextFromTriples( | |
# word, unique_listLabelTriples, | |
# text_splitter, args, | |
# load_map_query_input_output, cleanInput=True, questionText=questionText) | |
# else: | |
if not unique_listLabelTriples: | |
query = f""" | |
prefix skosxl: <http://www.w3.org/2008/05/skos-xl#> | |
SELECT DISTINCT ?labelS ?labelP ?labelO | |
WHERE {{ | |
{{ | |
<{xxUrl}> ?p ?o. | |
<{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelS . | |
?p skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelP . | |
?o skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelO . | |
}} | |
UNION | |
{{ | |
SELECT ?labelS ?labelP ?labelO | |
WHERE {{ | |
<{xxUrl}> ?p ?labelO . | |
<{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelS . | |
?p skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelP . | |
FILTER (isLiteral(?labelO)) | |
}} | |
}} | |
UNION | |
{{ | |
SELECT DISTINCT ?labelS ?labelP ?labelO | |
WHERE {{ | |
<{xxUrl}> ?ppp ?ooo . | |
?ooo rdf:type owl:Restriction . | |
?ooo owl:onProperty ?p . | |
?ooo owl:someValuesFrom ?o . | |
<{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelS . | |
?p skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelP . | |
?o skos:prefLabel|rdfs:label|skos:altLabel|skosxl:literalForm|obo:hasRelatedSynonym ?labelO . | |
}} | |
}} | |
}} | |
""" | |
try: | |
responseText = sparqlQuery(endpoint, query, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE)) | |
# Parse the response as JSON | |
results = json.loads(responseText) | |
if len(results) > 0 and results['results']['bindings']: | |
# word = "subClassOf" | |
# split_word = split_camel_case(word) | |
# # loop the results | |
labelTriples = "" | |
listLabelTriples = [] | |
pattern = r'\^\^<http:.*?>' | |
for result in results['results']['bindings']: | |
# print(result) | |
ss = str(result['labelS']['value']).strip().replace("..", ".").replace("@en","") | |
ss = re.sub(pattern, '', ss) | |
pp = split_camel_case(str(result['labelP']['value'])).replace("_"," ").strip().replace("..", ".").replace("@en","") | |
pp = re.sub(pattern, '', pp) | |
oo = str(result['labelO']['value']).replace("_"," ").strip().replace("..", ".").replace("@en","") | |
oo = re.sub(pattern, '', oo) | |
listLabelTriples.append([ss, pp, oo]) | |
# Remove duplicates while preserving order | |
unique_listLabelTriples = list( | |
dict.fromkeys(tuple(triple) for triple in listLabelTriples)) | |
# # If you need the result back in list of lists format | |
# unique_listLabelTriples = [list(triple) for triple in unique_listLabelTriples] | |
if unique_listLabelTriples: | |
if cache_map_virtuoso is not None: | |
if not xxUrl in cache_map_virtuoso: | |
cache_map_virtuoso[xxUrl] = {} | |
cache_map_virtuoso[xxUrl][ | |
"LabelTriples"] = unique_listLabelTriples | |
# singleContext, load_map_query_input_output = getLinearTextualContextFromTriples( | |
# word, unique_listLabelTriples, text_splitter, args, load_map_query_input_output, cleanInput=True, questionText=questionText) | |
# | |
# if singleContext and (strtobool(args.UseRetrieverForContextCreation)==False): | |
# if cache_map_virtuoso is not None: | |
# if not xxUrl in cache_map_virtuoso: | |
# cache_map_virtuoso[xxUrl] = {} | |
# cache_map_virtuoso[xxUrl][ | |
# "SingleContext"] = singleContext | |
except Exception as err: | |
unique_listLabelTriples = [] | |
if unique_listLabelTriples: | |
unique_listGlobalTriples.extend(unique_listLabelTriples) | |
# This is to speed up, so I break here the global, but in this case I will not store the triples for the other uris in the cache, which maybe useful in the future | |
# #if token_counter(str(unique_listGlobalTriples),args.model_name) > args.tokens_max: | |
if getattr(args, 'maxTriplesContextComputation', None): #it means it exists | |
if args.maxTriplesContextComputation > 0: | |
if len(unique_listGlobalTriples) > args.maxTriplesContextComputation: | |
unique_listGlobalTriples = unique_listGlobalTriples[:args.maxTriplesContextComputation] | |
BreakenBeforeAll = True | |
break # BREAK THE FOR LOOP IF THE GLOBAL CONTEXT IS ALREADY TOO BIG, BIGGER THAN tokens_max | |
if unique_listGlobalTriples: | |
# Remove duplicates while preserving order | |
unique_listGlobalTriples = list( | |
dict.fromkeys(tuple(triple) for triple in unique_listGlobalTriples)) | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
if BreakenBeforeAll == False: | |
cache_map_virtuoso[word][("GlobalTriples"+" "+contextWordVirtuoso).strip()] = unique_listGlobalTriples | |
globalContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, | |
unique_listGlobalTriples, | |
text_splitter, args, | |
load_map_query_input_output, cleanInput=True, questionText=questionText) | |
if globalContext and (strtobool(args.UseRetrieverForContextCreation)==False): | |
if cache_map_virtuoso is not None: | |
if not word in cache_map_virtuoso: | |
cache_map_virtuoso[word] = {} | |
if BreakenBeforeAll == False: | |
cache_map_virtuoso[word][("GlobalContext"+" "+contextWordVirtuoso).strip()] = globalContext | |
if unique_listLabelTriples: | |
sssingleTriples = " ,., ".join( | |
" ,,, ".join(str(element).capitalize() for element in triple) for triple in unique_listLabelTriples) | |
while "\\n" in sssingleTriples: | |
sssingleTriples = sssingleTriples.replace("\\n", " ") | |
sssingleTriples = sssingleTriples.strip() | |
while "\t" in sssingleTriples: | |
sssingleTriples = sssingleTriples.replace("\t", " ") | |
sssingleTriples = sssingleTriples.strip() | |
if unique_listGlobalTriples: | |
ggglobalTriples = " ,., ".join( | |
" ,,, ".join(str(element).capitalize() for element in triple) for triple in unique_listGlobalTriples) | |
while "\\n" in ggglobalTriples: | |
ggglobalTriples = ggglobalTriples.replace("\\n", " ") | |
ggglobalTriples = ggglobalTriples.strip() | |
while "\t" in ggglobalTriples: | |
ggglobalTriples = ggglobalTriples.replace("\t", " ") | |
ggglobalTriples = ggglobalTriples.strip() | |
return entityBioeUrl, ALLURIScontext, singleContext, globalContext, sssingleTriples, ggglobalTriples, cache_map_virtuoso, load_map_query_input_output | |
# def process_geo_row(row, key_geonames, cache_map_geonames): | |
# if row['IsGeo'] == 1: | |
# result, cache_map_geonames = geonames_api_call(row['word'], key_geonames, cache_map_geonames) | |
# return result, cache_map_geonames | |
# else: | |
# return None, cache_map_geonames | |
def process_row4Linking(row, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output): | |
result = "" #None | |
singleContext = "" #None | |
globalContext = "" #None | |
singleTriples = "" #None | |
globalTriples = "" #None | |
ALLURIScontext = [] | |
try: | |
if row.empty: | |
return result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames, cache_map_virtuoso, load_map_query_input_output, row.name | |
InRagMode=False | |
if hasattr(args, 'useBioKgRAG') and (strtobool(args.useBioKgRAG)==True): | |
InRagMode = True | |
if (InRagMode==False): | |
if row['IsGeo'] == 1: | |
if strtobool(args.debug): | |
print(f"\n----- IsGeo ... COMPUTING {row['word']} IN THE TEXT:") | |
print(row[args.source_column]) | |
result, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames = geonames_api_call(row['word'], args, key_geonames, cache_map_geonames) | |
elif row['IsBio'] == 1: | |
# Check if '@id' column exists in df_Extract | |
iiid = None | |
# Check if the '@id' exists in the Series | |
if '@id' in row: | |
# Check if the value is not None or NaN | |
if row['@id'] is not None and not pd.isna(row['@id']): | |
# Assign the value to the variable iiid | |
iiid = row['@id'] | |
iiiALLURIScontextFromNCBO = None | |
if 'ALLURIScontextFromNCBO' in row: | |
if row['ALLURIScontextFromNCBO'] is not None and isinstance(row['ALLURIScontextFromNCBO'], list): #and not pd.isna(row['ALLURIScontextFromNCBO']): | |
iiiALLURIScontextFromNCBO=row['ALLURIScontextFromNCBO'] | |
iiiALLURIScontextFromNCBO = list(set(iiiALLURIScontextFromNCBO)) | |
if strtobool(args.debug): | |
print(f"\n----- isBio COMPUTING ... {row['word']} IN THE TEXT:") | |
print(row[args.source_column]) | |
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call(row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO, UseBioportalForLinking=True, questionText=row[args.source_column]) | |
else: | |
if row['model'] == "Forced": | |
# Check if '@id' column exists in df_Extract | |
iiid = None | |
# Check if the '@id' exists in the Series | |
if '@id' in row: | |
# Check if the value is not None or NaN | |
if row['@id'] is not None and not pd.isna(row['@id']): | |
# Assign the value to the variable iiid | |
iiid = row['@id'] | |
iiiALLURIScontextFromNCBO = None | |
if 'ALLURIScontextFromNCBO' in row: | |
if row['ALLURIScontextFromNCBO'] is not None and isinstance(row['ALLURIScontextFromNCBO'], | |
list): # and not pd.isna(row['ALLURIScontextFromNCBO']): | |
iiiALLURIScontextFromNCBO = row['ALLURIScontextFromNCBO'] | |
iiiALLURIScontextFromNCBO = list(set(iiiALLURIScontextFromNCBO)) | |
if strtobool(args.debug): | |
print(f"\n----- isForced COMPUTING ... {row['word']} IN THE TEXT:") | |
print(row[args.source_column]) | |
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call( | |
row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, | |
id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO,UseBioportalForLinking=True,questionText=row[args.source_column]) | |
if not result: #try annotation without bioportal | |
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call( | |
row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, | |
id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO, UseBioportalForLinking=False,questionText=row[args.source_column]) | |
else: | |
# this case is only considered if it is not IsBio or IsGeo or RagMode, etcc..but there is already an annoptation for the NEL coming from NCBO | |
if '@id' in row: | |
# Check if the value is not None or NaN | |
if row['@id'] is not None and not pd.isna(row['@id']): | |
# Assign the value to the variable iiid | |
iiid = row['@id'] | |
iiiALLURIScontextFromNCBO = None | |
if 'ALLURIScontextFromNCBO' in row: | |
if row['ALLURIScontextFromNCBO'] is not None and isinstance( | |
row['ALLURIScontextFromNCBO'], | |
list): # and not pd.isna(row['ALLURIScontextFromNCBO']): | |
iiiALLURIScontextFromNCBO = row['ALLURIScontextFromNCBO'] | |
iiiALLURIScontextFromNCBO = list(set(iiiALLURIScontextFromNCBO)) | |
if strtobool(args.debug): | |
print(f"\n----- It is not IsBio or IsGeo, but it has id from NCBO ...forcing COMPUTING ... {row['word']} IN THE TEXT:") | |
print(row[args.source_column]) | |
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call( | |
row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, | |
load_map_query_input_output, | |
id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO, UseBioportalForLinking=True, | |
questionText=row[args.source_column]) | |
else: | |
if (row['IsBio'] == 1) or ( (pd.isnull(row["IsBio"]) or row["IsBio"] == '' or row['IsBio'] == 0 or row["IsBio"] is None) and (row['entity_group'] == "MISC") ): | |
if strtobool(args.debug): | |
print(f"\n----- InRagMode ...COMPUTING ... {row['word']} IN THE TEXT:") | |
print(row[args.source_column]) | |
# Check if '@id' column exists in df_Extract | |
iiid = None | |
# Check if the '@id' exists in the Series | |
if '@id' in row: | |
# Check if the value is not None or NaN | |
if row['@id'] is not None and not pd.isna(row['@id']): | |
# Assign the value to the variable iiid | |
iiid = row['@id'] | |
iiiALLURIScontextFromNCBO = None | |
if 'ALLURIScontextFromNCBO' in row: | |
if row['ALLURIScontextFromNCBO'] is not None and isinstance(row['ALLURIScontextFromNCBO'], list): | |
iiiALLURIScontextFromNCBO = row['ALLURIScontextFromNCBO'] | |
iiiALLURIScontextFromNCBO = list(set(iiiALLURIScontextFromNCBO)) | |
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call( | |
row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO,UseBioportalForLinking=True,questionText=row[args.source_column]) | |
return result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames, cache_map_virtuoso, load_map_query_input_output, row.name | |
except Exception as e: | |
#print(f"Error occurred: {e}") | |
return result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames, cache_map_virtuoso, load_map_query_input_output, row.name | |
def parallel_process_Row4Linking(df, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output): | |
results = [] | |
with ThreadPoolExecutor(max_workers=args.num_cores_eLinking) as executor: | |
# Submit tasks to ThreadPoolExecutor | |
futures = [executor.submit(process_row4Linking, row, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output) | |
for _, row in df.iterrows()] | |
# Collect results | |
for future in as_completed(futures): | |
try: | |
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames_Inner, cache_map_virtuoso_Inner, load_map_query_input_output_Inner, drm_idx = future.result() | |
df.at[drm_idx,'namedEntity'] = result | |
df.at[drm_idx, 'ALLURIScontext'] = ALLURIScontext | |
df.at[drm_idx,'Context'] = singleContext | |
df.at[drm_idx,'ContextGlobal'] = globalContext | |
df.at[drm_idx, 'Triples'] = singleTriples | |
df.at[drm_idx, 'TriplesGlobal'] = globalTriples | |
# Recursively update cache_map files with values from _Inner --> actually, this will never happen, because when you pass a mutable object like a dictionary to a function, | |
# changes made to that object within the function are reflected outside the function as well. This is because the function receives a reference to the original object, | |
# rather than a copy of it. | |
# cache_map_geonames, cache_map_virtuoso, load_map_query_input_output are like global variables, which is very good for the parallelization! | |
if (cache_map_geonames == cache_map_geonames_Inner)==False: | |
update_nested_dict(cache_map_geonames, cache_map_geonames_Inner) | |
if (cache_map_virtuoso == cache_map_virtuoso_Inner) == False: | |
update_nested_dict(cache_map_virtuoso, cache_map_virtuoso_Inner) | |
if (load_map_query_input_output == load_map_query_input_output_Inner) == False: | |
update_nested_dict(load_map_query_input_output, load_map_query_input_output_Inner) | |
except Exception as e: | |
print(f"Error occurred: {e}") | |
return df, cache_map_geonames, cache_map_virtuoso, load_map_query_input_output | |
def elinking(df_annotated_combined, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, device): | |
if "ALLURIScontext" not in df_annotated_combined.columns: | |
df_annotated_combined["ALLURIScontext"] = None | |
if args.num_cores_eLinking>1: | |
# parallel elinking process | |
#result | |
df_annotated_combined, cache_map_geonames_AFTER, cache_map_virtuoso_AFTER, load_map_query_input_output_AFTER = parallel_process_Row4Linking(df_annotated_combined, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output) | |
#if isinstance(result, list): | |
# result=pd.Series(result) # I need this after the parallel processing | |
else: | |
# single processing | |
result = df_annotated_combined.apply(lambda row: process_row4Linking(row, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output), axis=1) | |
# | |
try: | |
df_annotated_combined['namedEntity'] = result.str[0] | |
df_annotated_combined['ALLURIScontext'] = result.str[1] | |
df_annotated_combined['Context'] = result.str[2] | |
df_annotated_combined['ContextGlobal'] = result.str[3] | |
df_annotated_combined['Triples'] = result.str[4] | |
df_annotated_combined['TriplesGlobal'] = result.str[5] | |
cache_map_geonames_AFTER = result.str[6].iloc[-1] | |
cache_map_virtuoso_AFTER = result.str[7].iloc[-1] | |
load_map_query_input_output_AFTER = result.str[8].iloc[-1] # | |
except Exception as e: | |
# print(f"Error occurred: {e}") | |
df_annotated_combined['namedEntity'] = "" | |
df_annotated_combined['ALLURIScontext'] = "" | |
df_annotated_combined['Context'] = "" | |
df_annotated_combined['ContextGlobal'] = "" | |
df_annotated_combined['Triples'] = "" | |
df_annotated_combined['TriplesGlobal'] = "" | |
cache_map_geonames_AFTER = cache_map_geonames | |
cache_map_virtuoso_AFTER = cache_map_virtuoso | |
load_map_query_input_output_AFTER = load_map_query_input_output | |
def fill_alluriscontext(row): | |
if not row['ALLURIScontext'] and pd.notnull(row['namedEntity']): | |
return [row['namedEntity']] | |
return row['ALLURIScontext'] | |
# Apply the function to the DataFrame | |
df_annotated_combined['ALLURIScontext'] = df_annotated_combined.apply(fill_alluriscontext, axis=1) | |
if args.num_cores_eLinking>1: | |
# if parallel elinking process I need to sort again, because rows in late will arrive later | |
df_annotated_combined = df_annotated_combined.sort_values(by=['SentenceRef', 'start', 'ToLink', 'word', 'score'], | |
ascending=[True, True, True, True, False]) | |
return df_annotated_combined, cache_map_geonames_AFTER, cache_map_virtuoso_AFTER, load_map_query_input_output_AFTER | |
if __name__ == '__main__': | |
# df_rrr = pd.read_csv("ToAnnotateTrial2_OutputAnnotated-withTriples.csv", sep=',', header=0, dtype=str, encoding='utf-8') | |
# print("try") | |
# accelerator = Accelerator() | |
# # accelerator = Accelerator(mixed_precision='fp16', device_placement=False) #fp16 bf16 | |
# device = accelerator.device | |
# print("Using accelerator device = " + str(device)) | |
start = time.time() | |
encod = encoding_getter('microsoft/deberta-v3-large') | |
text_splitter = TokenTextSplitter( | |
# separators=separators, | |
encoding_name=encod.name, | |
chunk_size=80000, | |
chunk_overlap=50, | |
length_function=len, | |
add_start_index=True, | |
) | |
#models_List = ["FacebookAI/xlm-roberta-large-finetuned-conll03-english", "Babelscape/wikineural-multilingual-ner", "blaze999/Medical-NER", "urchade/gliner_large-v2.1", "urchade/gliner_large_bio-v0.1", "NCBO/BioPortal" ] #"urchade/gliner_large-v2.1", "knowledgator/gliner-multitask-large-v0.5" | |
#models_List = ["urchade/gliner_large_bio-v0.1"] | |
#models_List = [ "Babelscape/wikineural-multilingual-ner", "urchade/gliner_large-v2.1", "NCBO/BioPortal"] | |
models_List = ["NCBO/BioPortal"] | |
# Babelscape/wikineural-multilingual-ner | |
# https://huggingface.co/Babelscape/wikineural-multilingual-ner? | |
# https://aclanthology.org/2021.findings-emnlp.215.pdf | |
# PERSON → PER, ORG → ORG, GPE → LOC, | |
# LOC → LOC, FAC → LOC, PRODUCT → MISC, | |
# EVENT → MISC, WORK_OF_ART → MISC, | |
# LAW → O, LANGUAGE → MISC, NORP → MISC, | |
# DATA → O, TIME → O, PERCENT → O, | |
# MONEY → O, QUANTITY → O, ORDINAL → O, | |
# CARDINAL → O | |
# --> "LOC" "MISC" | |
# blaze999/Medical-NER | |
# https://huggingface.co/blaze999/Medical-NER | |
# 41 recognised entities: https://huggingface.co/blaze999/Medical-NER/blob/main/config.json | |
# | |
# FacebookAI/xlm-roberta-large-finetuned-conll03-english | |
# https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll03-english | |
# urchade/gliner_large-v2.1 | |
# https://huggingface.co/urchade/gliner_large-v2.1 | |
# | |
# urchade/gliner_large_bio-v0.1 | |
# https://huggingface.co/urchade/gliner_large_bio-v0.1 | |
# model = GLiNER.from_pretrained("urchade/gliner_large-v2.1") | |
# model = GLiNER.from_pretrained("urchade/gliner_large_bio-v0.1") | |
# | |
# text = """ | |
# The doctor diagnosed the patient with basal cell carcinoma, a common type of skin cancer. The Health Ministry has detected about 4000 suspected chikungunya cases nationwide this year [2008], Minister Datuk Liow Tiong Lai said Wednesday [17 Dec 2008]. | |
# He said the disease which was 1st detected in Johor had spread to Negeri Sembilan, Melaka, Perak, Selangor and the latest Kedah. He said the disease was 1st detected to have spread to Malaysia from Africa in 1997. | |
# """ | |
# | |
# glinerlabels = ["location", "disease"] | |
# | |
# entities = model.predict_entities(text, glinerlabels) | |
# | |
# for entity in entities: | |
# print(entity["text"], "=>", entity["label"]) | |
df_annotated = pd.DataFrame() | |
for model_id in models_List: | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--model_id", type=str, default=model_id, help="model to use") | |
parser.add_argument("--debug", type=str, default="True", help="set debug mode") | |
# parser.add_argument("--input_dir", type=str, default="/eos/jeodpp/data/products/JRC-OpenData/DIGLIFE/ner2/") #/eos/jeodpp/home/users/consose/PycharmProjects/ner-virtuoso/ | |
# parser.add_argument("--filename", type=str, default="OutputAnnotatedTexts-LLMs-ENSEMBLE.csv") #ToAnnotateTrial.csv | |
# parser.add_argument("--source_column", type=str, default="texts") | |
# # | |
parser.add_argument("--input_dir", type=str, | |
default="/eos/jeodpp/home/users/consose/PycharmProjects/ner-virtuoso/") # | |
parser.add_argument("--filename", type=str, | |
default="ToAnnotateTrial2.csv") #ToAnnotateTrial.csv #ToAnnotateTrial2.csv #ToAnnotateTrialGliner.csv #ToAnnotateTrial3.csv | |
parser.add_argument("--source_column", type=str, default="ContextToAnnotate") # "" | |
parser.add_argument("--entities_filter_threshold", type=int, default=0.7) | |
parser.add_argument("--SEED", type=int, default=41) | |
parser.add_argument("--batch_size", type=int, default=4) # 2 - 4 - 8 - 16 | |
parser.add_argument("--num_cores_Gliner", type=int, default=0, help="parallel processing for Gliner annotation") # 0 means use the GPU for Gliner ! | |
parser.add_argument("--entity_linking", type=str, default="True", help="whether to make entities linking or not") | |
parser.add_argument("--geonameskey_filename", type=str, default="GEONAMES-API.key", help="file location where it is stored the geonames api key") | |
parser.add_argument("--virtuosokey_filename", type=str, default="VIRTUOSO-dba.key", help="file location where it is stored the virtuoso endpoint dba pwd") | |
parser.add_argument("--bioportalkey_filename", type=str, default="NCBO-BioPortal.key", help="file location where it is stored the NCBO BioPortal api key") | |
# Add the --KG_restriction argument | |
# Since nargs='+' is used, this will accept one or more values from the command line | |
KGchoices = POSSIBLE_KGchoices_List | |
# KGchoices = ['SNOMED', 'LOINC', 'ICD10', 'NCIT'] | |
# KGchoices = ['SNOMED', 'LOINC', 'ICD10', 'MESH', 'NCIT'] # restricts the input to these values only | |
if KGchoices: | |
KGchoices.sort() | |
parser.add_argument("--KG_restriction", nargs='+', choices=KGchoices, default=KGchoices, | |
help="List of ontologies to which restrict the entity linking task.") | |
# consose 20250502: | |
if Counter(KGchoices) == Counter(POSSIBLE_KGchoices_List): | |
parser.add_argument("--USE_CACHE", type=str, default="True", #True | |
help="whether to use cache for the NER and NEL tasks or not") | |
else: | |
# print("Lists do not have the same elements") | |
parser.add_argument("--USE_CACHE", type=str, default="False", | |
help="whether to use cache for the NER and NEL tasks or not") | |
parser.add_argument("--num_cores_eLinking", type=int, default=10, help="parallel processing for the entity linking process") | |
parser.add_argument("--computeEntityContext", type=str, default="False", help="whether to extract a readable context from the extracted triples for the concept") | |
parser.add_argument("--computeEntityGlobalContext", type=str, default="False", help="whether to extract a readable context from the extracted triples of all the entities extracted from the endpoint for the concept") | |
parser.add_argument("--maxTriplesContextComputation", type=int, default=20000, | |
help="maximum number of triples to consider for global context computation") # if 0 or None it is not considered | |
parser.add_argument("--UseRetrieverForContextCreation", type=str, default="True", | |
help="whether to use a retriever for the creation of the context of the entities from the triples coming from the KGs") | |
parser.add_argument("--service_provider", type=str, default="gptjrc", help="llm service provider") # "gptjrc", "HFonPremises" | |
parser.add_argument("--model_name", type=str, default="llama-3.1-70b-instruct-fp8", help="llm to use") # llama-3.1-70b-instruct "llama-3.1-70b-instruct" "llama-3.1-70b-instruct-fp8" | |
parser.add_argument("--tokens_max", type=int, default=80000, help="max number of tokens to supply to the llm") | |
parser.add_argument("--max_new_tokens", type=int, default=4096, help="max number of tokens for the output of the LLM in premises") | |
parser.add_argument("--temperature", type=int, default=0.01) | |
args = parser.parse_args() | |
print("ARGS:") | |
print(args) | |
# %% n machine learning tasks, particularly when dealing with models that have stochasticity involved (like text generation), it's important to set seeds for random number generators to ensure reproducibility of results. In the case of using models from the transformers library, you need to set seeds for both Python's random module, NumPy, and PyTorch to ensure that the results are the same every time you run the code. | |
# Before you create the pipeline and run the text generation, set the seeds like this: | |
random.seed(args.SEED) | |
np.random.seed(args.SEED) | |
torch.manual_seed(args.SEED) | |
torch.cuda.manual_seed_all(args.SEED) | |
### | |
# cache_map_bioportal = None | |
# if strtobool(args.USE_CACHE): | |
# cachebioportal_filename = "CACHE_bioportal.json" | |
# if os.path.exists(cachebioportal_filename): | |
# with open(cachebioportal_filename) as f: | |
# cache_map_bioportal = json.load(f) | |
# else: | |
# cache_map_bioportal = {} | |
# | |
# key_bioportal = "" | |
# if args.bioportalkey_filename and os.path.exists(args.bioportalkey_filename): | |
# fkeyname = args.bioportalkey_filename | |
# with open(fkeyname) as f: | |
# key_bioportal = f.read() | |
in_filename = args.input_dir + args.filename | |
df_ToAnnotate = pd.read_csv(in_filename, sep=',', header=0, dtype=str, encoding='utf-8') | |
#df_ToAnnotate = df_ToAnnotate.head() #for debugging | |
#df_ToAnnotate = df_ToAnnotate.head(1) # for debugging | |
if "ToLink" not in df_ToAnnotate.columns: | |
df_ToAnnotate["ToLink"] = None | |
if "SentenceRef" not in df_ToAnnotate.columns: | |
df_ToAnnotate["SentenceRef"] = None | |
df_ToAnnotate = df_ToAnnotate[['SentenceRef'] + [col for col in df_ToAnnotate.columns if | |
col != 'SentenceRef']] # this moves it to the first position | |
#df_ToAnnotate['SentenceRef'] = df_ToAnnotate[args.source_column].rank(method='dense', ascending=True).astype(int) | |
# df_ToAnnotate['rank'] = df_ToAnnotate.index + 1 | |
# df_ToAnnotate['rank2'] = df_ToAnnotate['rank'].argsort().groupby(df_ToAnnotate[args.source_column]).transform('min').astype(int) | |
# df_ToAnnotate['rank2'] = df_ToAnnotate['rank2'].rank(method='dense').astype(int) | |
df_ToAnnotate['SentenceRef'] = df_ToAnnotate.index + 1 | |
df_ToAnnotate['SentenceRef'] = df_ToAnnotate['SentenceRef'].argsort().groupby(df_ToAnnotate[args.source_column]).transform('min').astype(int) | |
df_ToAnnotate['SentenceRef'] = df_ToAnnotate['SentenceRef'].rank(method='dense').astype(int) | |
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# if strtobool(args.debug): | |
# print(f"Device: {device}...") | |
# if device.type == "cuda": | |
# print("GPU number:", torch.cuda.current_device()) | |
tsk = "token-classification" | |
pipe = None | |
tokenizerGliner = None | |
modelGliner = None | |
modelGlinerBio = None | |
if (("/gliner" in args.model_id) == False) and (("NCBO" in args.model_id) == False): | |
pipe = pipeline( | |
tsk, | |
model=args.model_id, | |
aggregation_strategy="simple", | |
device=device, | |
) | |
elif ("/gliner" in args.model_id): | |
if not tokenizerGliner: | |
tokenizerGliner = AutoTokenizer.from_pretrained('microsoft/deberta-v3-large') | |
if "_bio-" in args.model_id: | |
if args.num_cores_Gliner > 0: | |
modelGlinerBio = GLiNER.from_pretrained(args.model_id) #"urchade/gliner_large_bio-v0.1") | |
else: | |
modelGlinerBio = GLiNER.from_pretrained(args.model_id, map_location=device) | |
else: | |
if args.num_cores_Gliner > 0: | |
modelGliner = GLiNER.from_pretrained(args.model_id) #"knowledgator/gliner-multitask-large-v0.5" - "urchade/gliner_large-v2.1" | |
else: | |
modelGliner = GLiNER.from_pretrained(args.model_id, map_location=device) | |
new_annotations = annotate(df_ToAnnotate, args, pipe, tokenizerGliner, modelGliner, modelGlinerBio, device) | |
if not new_annotations.empty: | |
if df_annotated.empty: | |
# If df_annotated is empty, just assign new_annotations to it | |
df_annotated = new_annotations | |
else: | |
# If df_annotated is not empty, concatenate new_annotations to it | |
df_annotated = pd.concat([df_annotated, new_annotations], ignore_index=True) | |
if not df_annotated.empty: | |
df_annotated_combined = entitiesFusion(df_annotated,args) | |
if strtobool(args.debug): | |
print("\nStart is_cross_inside function ...") | |
df_annotated_combined = is_cross_inside(df_annotated_combined, args, 0.75) | |
else: | |
df_annotated_combined = df_annotated | |
#### GPT@JRC API | |
if args.service_provider == "gptjrc": | |
key_gptjrc = "" | |
fkeyname = "GPTJRC-APItoken.key" | |
if os.path.exists(fkeyname): | |
with open(fkeyname) as f: | |
key_gptjrc = f.read() | |
else: | |
key_gptjrc = os.environ['key_gptjrc'] | |
if key_gptjrc and key_gptjrc != "": | |
setup_gptjrc(key_gptjrc) | |
##### | |
cache_prefix_fp = "LLMQUERYNER" | |
cache_nameLLMs = cache_prefix_fp + "___" + "__".join( | |
[args.service_provider, args.model_name, str(args.temperature)]).replace( | |
" ", "_") + ".json" | |
load_map_query_input_output = None | |
if strtobool(args.USE_CACHE): | |
if os.path.exists(cache_nameLLMs): | |
with open(cache_nameLLMs) as f: | |
load_map_query_input_output = json.load(f) | |
else: | |
load_map_query_input_output = {} | |
### entity linking part: | |
if strtobool(args.entity_linking): | |
cache_map_geonames = None | |
if strtobool(args.USE_CACHE): | |
cacheGeonames_filename = "CACHE_geonames.json" | |
if os.path.exists(cacheGeonames_filename): | |
with open(cacheGeonames_filename) as f: | |
cache_map_geonames = json.load(f) | |
else: | |
cache_map_geonames = {} | |
key_geonames = "" | |
if args.geonameskey_filename and os.path.exists(args.geonameskey_filename): | |
fkeyname = args.geonameskey_filename | |
with open(fkeyname) as f: | |
key_geonames = f.read() | |
else: | |
key_geonames = os.environ['key_geonames'] | |
cache_map_virtuoso = None | |
if strtobool(args.USE_CACHE): | |
cacheVirtuoso_filename = "CACHE_virtuoso.json" | |
if os.path.exists(cacheVirtuoso_filename): | |
with open(cacheVirtuoso_filename) as f: | |
cache_map_virtuoso = json.load(f) | |
else: | |
cache_map_virtuoso = {} | |
key_virtuoso = "" | |
if args.virtuosokey_filename and os.path.exists(args.virtuosokey_filename): | |
fkeyname = args.virtuosokey_filename | |
with open(fkeyname) as f: | |
key_virtuoso = f.read() | |
else: | |
key_virtuoso = os.environ['key_virtuoso'] | |
# Here for the EXACT MATCHING "" - if the desired term has not been identified in the NER, add to the dataframe: | |
# Define the condition to find missing SentenceRefs | |
missing_sentence_refs = ~df_ToAnnotate['SentenceRef'].isin(df_annotated_combined['SentenceRef']) | |
# Define the condition to check if ContextToAnnotate starts and ends with quotes | |
quoted_context = df_ToAnnotate[args.source_column].str.startswith('"') & df_ToAnnotate[ | |
args.source_column].str.endswith('"') | |
# Combine both conditions | |
condition = missing_sentence_refs & quoted_context | |
# Select rows from df_ToAnnotate that meet the condition | |
rows_to_add = df_ToAnnotate[condition].copy() | |
rows_to_add['model'] = "Forced" | |
rows_to_add['entity_group'] = "MISC" | |
rows_to_add['word'] = rows_to_add[args.source_column] | |
rows_to_add['word'] = rows_to_add[args.source_column].apply(strip_quotes) | |
rows_to_add['score'] = 1.0 | |
rows_to_add['start'] = int(1) | |
rows_to_add['end'] = rows_to_add['word'].apply(len)+int(1) | |
rows_to_add['IsGeo'] = None | |
rows_to_add['IsBio'] = None | |
rows_to_add['IsCrossInside'] = 0.0 | |
# Append these rows to df_annotated_combined | |
df_annotated_combined = pd.concat([df_annotated_combined, rows_to_add], ignore_index=True) | |
df_annotated_combined = df_annotated_combined.sort_values( | |
by=['SentenceRef', 'start', 'ToLink', 'word', 'score'], ascending=[True, True, True, True, False]) | |
# Now df_annotated_combined contains the additional rows | |
df_annotated_combined, cache_map_geonames_AFTER, cache_map_virtuoso_AFTER, load_map_query_input_output_AFTER = elinking(df_annotated_combined, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, device) | |
if strtobool(args.USE_CACHE): | |
if cache_map_geonames_AFTER is not None: | |
with open(cacheGeonames_filename, "w") as f: | |
json.dump(cache_map_geonames_AFTER, f) | |
if cache_map_virtuoso_AFTER is not None: | |
with open(cacheVirtuoso_filename, "w") as f: | |
json.dump(cache_map_virtuoso_AFTER, f) | |
if load_map_query_input_output_AFTER is not None: | |
with open(cache_nameLLMs, "w") as f: | |
json.dump(load_map_query_input_output_AFTER, f) | |
### end entity linking part | |
# # delete Triples and TriplesGlobal if not needed, too space | |
all_emptyTriples = None | |
all_emptyGlobalTriples = None | |
if 'Triples' in df_annotated_combined.columns: | |
all_emptyTriples = df_annotated_combined['Triples'].apply(lambda x: pd.isnull(x) or x == '' or x is None).all() | |
#print("All values in 'Triples' are empty (NaN, None, or ''):", all_emptyTriples) | |
if 'TriplesGlobal' in df_annotated_combined.columns: | |
all_emptyGlobalTriples = df_annotated_combined['TriplesGlobal'].apply(lambda x: pd.isnull(x) or x == '' or x is None).all() | |
#print("All values in 'TriplesGlobal' are empty (NaN, None, or ''):", all_emptyGlobalTriples) | |
if '@id' in df_annotated_combined.columns: | |
df_annotated_combined = df_annotated_combined.rename(columns={'@id': '@idfromNCBO'}) | |
# df_annotated_combined.drop('@id', axis=1, inplace=True) | |
# | |
if 'ALLURIScontextFromNCBO' in df_annotated_combined.columns: | |
df_annotated_combined.drop('ALLURIScontextFromNCBO', axis=1, inplace=True) | |
if (all_emptyTriples == False) or (all_emptyGlobalTriples==False): | |
output_texts_filename = args.input_dir + args.filename.replace(".csv", "_OutputAnnotated-withTriples.csv") | |
df_annotated_combined.to_csv(output_texts_filename, sep=',', header=True, index=False, encoding='utf-8') | |
df_annotated_combined.drop('Triples', axis=1, inplace=True) | |
df_annotated_combined.drop('TriplesGlobal', axis=1, inplace=True) | |
output_texts_filename = args.input_dir + args.filename.replace(".csv", "_OutputAnnotated.csv") | |
df_annotated_combined.to_csv(output_texts_filename, sep=',', header=True, index=False, encoding='utf-8') | |
print("\nEnd script") | |
# | |
end = time.time() | |
hours, rem = divmod(end - start, 3600) | |
minutes, seconds = divmod(rem, 60) | |
print("Overall Computational Time... {:0>2}:{:0>2}:{:05.2f}\n".format(int(hours), int(minutes), seconds)) | |
# | |